code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from deepspeech.frontend.utility import IGNORE_ID
from deepspeech.io.utility import pad_sequence
from deepspeech.utils.log import Log
__all__ = ["SpeechCollator"]
logger = Log(__name__).getlog()
class SpeechCollator():
def __init__(self, keep_transcription_text=True):
"""
Padding audio features with zeros to make them have the same shape (or
a user-defined shape) within one bach.
if ``keep_transcription_text`` is False, text is token ids else is raw string.
"""
self._keep_transcription_text = keep_transcription_text
def __call__(self, batch):
"""batch examples
Args:
batch ([List]): batch is (audio, text)
audio (np.ndarray) shape (D, T)
text (List[int] or str): shape (U,)
Returns:
tuple(audio, text, audio_lens, text_lens): batched data.
audio : (B, Tmax, D)
audio_lens: (B)
text : (B, Umax)
text_lens: (B)
"""
audios = []
audio_lens = []
texts = []
text_lens = []
for audio, text in batch:
# audio
audios.append(audio.T) # [T, D]
audio_lens.append(audio.shape[1])
# text
# for training, text is token ids
# else text is string, convert to unicode ord
tokens = []
if self._keep_transcription_text:
assert isinstance(text, str), (type(text), text)
tokens = [ord(t) for t in text]
else:
tokens = text # token ids
tokens = tokens if isinstance(tokens, np.ndarray) else np.array(
tokens, dtype=np.int64)
texts.append(tokens)
text_lens.append(tokens.shape[0])
padded_audios = pad_sequence(
audios, padding_value=0.0).astype(np.float32) #[B, T, D]
audio_lens = np.array(audio_lens).astype(np.int64)
padded_texts = pad_sequence(
texts, padding_value=IGNORE_ID).astype(np.int64)
text_lens = np.array(text_lens).astype(np.int64)
return padded_audios, audio_lens, padded_texts, text_lens
| [
"numpy.array",
"deepspeech.io.utility.pad_sequence",
"deepspeech.utils.log.Log"
] | [((804, 817), 'deepspeech.utils.log.Log', 'Log', (['__name__'], {}), '(__name__)\n', (807, 817), False, 'from deepspeech.utils.log import Log\n'), ((2330, 2362), 'numpy.array', 'np.array', (['tokens'], {'dtype': 'np.int64'}), '(tokens, dtype=np.int64)\n', (2338, 2362), True, 'import numpy as np\n'), ((2484, 2523), 'deepspeech.io.utility.pad_sequence', 'pad_sequence', (['audios'], {'padding_value': '(0.0)'}), '(audios, padding_value=0.0)\n', (2496, 2523), False, 'from deepspeech.io.utility import pad_sequence\n'), ((2589, 2609), 'numpy.array', 'np.array', (['audio_lens'], {}), '(audio_lens)\n', (2597, 2609), True, 'import numpy as np\n'), ((2650, 2694), 'deepspeech.io.utility.pad_sequence', 'pad_sequence', (['texts'], {'padding_value': 'IGNORE_ID'}), '(texts, padding_value=IGNORE_ID)\n', (2662, 2694), False, 'from deepspeech.io.utility import pad_sequence\n'), ((2745, 2764), 'numpy.array', 'np.array', (['text_lens'], {}), '(text_lens)\n', (2753, 2764), True, 'import numpy as np\n')] |
import asyncio
import logging
import time
from functools import partial
from signal import SIGINT, SIGTERM
from panic import \
datatypes as panic_datatypes
logger = logging.getLogger(__name__)
current_time = None
def update_current_time(loop):
"""
Caches the current time, since it is needed
at the end of every keep-alive request to update the request timeout time
:param loop:
:return:
"""
global current_time
current_time = time.time()
loop.call_later(1, partial(update_current_time, loop))
def serve(params: panic_datatypes.ServerParams) -> None:
logger.info(f'Goin\' Fast @ http://{params.host}:{params.port}')
server = partial(params.protocol, params)
# params.protocol,
# para
# loop=params.loop,
# connections=params.connections,
# signal=params.signal,
# request_handler=params.request_handler,
# error_handler=params.error_handler,
# request_timeout=params.request_timeout
#)
server_coroutine = params.loop.create_server(
server,
host=params.host,
port=params.port,
reuse_port=params.reuse_port)
#sock=params.sock)
params.loop.call_soon(partial(update_current_time, params.loop))
try:
http_server = params.loop.run_until_complete(server_coroutine)
except Exception:
logger.exception("Unable to start server")
return
# Register signals for graceful termination
for _signal in (SIGINT, SIGTERM):
params.loop.add_signal_handler(_signal, params.loop.stop)
try:
params.loop.run_forever()
finally:
logger.info("Stop requested, draining connections...")
# Wait for event loop to finish and all connections to drain
http_server.close()
params.loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
params.signal.stopped = True
for connection in params.connections:
connection.close_if_idle()
while params.connections:
params.loop.run_until_complete(asyncio.sleep(0.1))
params.loop.close()
| [
"logging.getLogger",
"functools.partial",
"time.time",
"asyncio.sleep"
] | [((172, 199), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (189, 199), False, 'import logging\n'), ((468, 479), 'time.time', 'time.time', ([], {}), '()\n', (477, 479), False, 'import time\n'), ((677, 709), 'functools.partial', 'partial', (['params.protocol', 'params'], {}), '(params.protocol, params)\n', (684, 709), False, 'from functools import partial\n'), ((503, 537), 'functools.partial', 'partial', (['update_current_time', 'loop'], {}), '(update_current_time, loop)\n', (510, 537), False, 'from functools import partial\n'), ((1149, 1190), 'functools.partial', 'partial', (['update_current_time', 'params.loop'], {}), '(update_current_time, params.loop)\n', (1156, 1190), False, 'from functools import partial\n'), ((1964, 1982), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (1977, 1982), False, 'import asyncio\n')] |
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.uix.button import Button, Label
from kivy.properties import ListProperty, ObjectProperty
from game import Game
from player import Player
from helpers import new_targets
class PlayerMenuButtons(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 2
self.btn1 = Button(text='1')
self.btn1.bind(on_press=self.update)
self.btn2 = Button(text='2')
self.btn2.bind(on_press=self.update)
self.btn3 = Button(text='3')
self.btn3.bind(on_press=self.update)
self.btn4 = Button(text='4')
self.btn4.bind(on_press=self.update)
self.add_widget(self.btn1)
self.add_widget(self.btn2)
self.add_widget(self.btn3)
self.add_widget(self.btn4)
def update(self, instance):
print(instance)
app = App.get_running_app()
app.root.configure_players(instance)
instance.parent.parent.show_current_players()
class PlayerMenu(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.label = Label(text="Select # of Players:")
self.add_widget(self.label)
self.player_menu_buttons = PlayerMenuButtons()
self.add_widget(self.player_menu_buttons)
def show_current_players(self):
app = App.get_running_app()
self.label.text = 'Current Players:'
self.remove_widget(self.player_menu_buttons)
self.current_players_label = Label(text=str(len(app.root.players)))
self.add_widget(self.current_players_label)
class TurnMenu(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.add_widget(Button(text='Undo'))
self.add_widget(Button(text='Clear'))
class Menu(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.add_widget(PlayerMenu())
self.add_widget(TurnMenu())
class ScoreColumn(BoxLayout):
def __init__(self, player, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.player = player
self.add_widget(Label(text=self.player.name))
for target in self.player.targets:
self.add_widget(Label(text=str(target['shots'])))
self.add_widget(Label(text=""))
class TargetsColumn(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.add_widget(Label(text="P", underline=True))
self.add_widget(Label(text="20"))
self.add_widget(Label(text="19"))
self.add_widget(Label(text="18"))
self.add_widget(Label(text="17"))
self.add_widget(Label(text="16"))
self.add_widget(Label(text="15"))
self.add_widget(Label(text="B"))
self.add_widget(Label(text=""))
class Sheet(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "horizontal"
self.targets = TargetsColumn()
self.add_widget(self.targets)
def sync_players_to_sheet(self, players):
for player in players:
self.add_widget(ScoreColumn(player))
class Root(BoxLayout):
players = ListProperty([])
game = ObjectProperty(Game(players))
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "horizontal"
self.label = Label(id='instruction', text=self.game.state.value)
self.add_widget(self.label)
self.sheet = Sheet()
self.add_widget(self.sheet)
self.menu = Menu()
self.add_widget(self.menu)
def configure_players(self, instance):
self.players = []
print("Set number to %s", instance.text)
number = int(instance.text) - 1
while (len(self.players) <= number):
name = str(len(self.players) + 1)
self.players.append(Player(name, 0, new_targets()))
print(self.players)
self.sheet.sync_players_to_sheet(self.players)
self.set_game_players()
def set_game_players(self):
self.game.set_players(self.players)
print(self.game.state.value)
self.label.text = self.game.state.value
class CrayketApp(App):
def build(self):
return Root()
CrayketApp().run()
| [
"helpers.new_targets",
"kivy.uix.button.Button",
"game.Game",
"kivy.properties.ListProperty",
"kivy.uix.button.Label",
"kivy.app.App.get_running_app"
] | [((3463, 3479), 'kivy.properties.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (3475, 3479), False, 'from kivy.properties import ListProperty, ObjectProperty\n'), ((472, 488), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""1"""'}), "(text='1')\n", (478, 488), False, 'from kivy.uix.button import Button, Label\n'), ((554, 570), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""2"""'}), "(text='2')\n", (560, 570), False, 'from kivy.uix.button import Button, Label\n'), ((636, 652), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""3"""'}), "(text='3')\n", (642, 652), False, 'from kivy.uix.button import Button, Label\n'), ((718, 734), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""4"""'}), "(text='4')\n", (724, 734), False, 'from kivy.uix.button import Button, Label\n'), ((991, 1012), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (1010, 1012), False, 'from kivy.app import App\n'), ((1271, 1305), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""Select # of Players:"""'}), "(text='Select # of Players:')\n", (1276, 1305), False, 'from kivy.uix.button import Button, Label\n'), ((1498, 1519), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (1517, 1519), False, 'from kivy.app import App\n'), ((3506, 3519), 'game.Game', 'Game', (['players'], {}), '(players)\n', (3510, 3519), False, 'from game import Game\n'), ((3652, 3703), 'kivy.uix.button.Label', 'Label', ([], {'id': '"""instruction"""', 'text': 'self.game.state.value'}), "(id='instruction', text=self.game.state.value)\n", (3657, 3703), False, 'from kivy.uix.button import Button, Label\n'), ((1906, 1925), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""Undo"""'}), "(text='Undo')\n", (1912, 1925), False, 'from kivy.uix.button import Button, Label\n'), ((1951, 1971), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""Clear"""'}), "(text='Clear')\n", (1957, 1971), False, 'from kivy.uix.button import Button, Label\n'), ((2379, 2407), 'kivy.uix.button.Label', 'Label', ([], {'text': 'self.player.name'}), '(text=self.player.name)\n', (2384, 2407), False, 'from kivy.uix.button import Button, Label\n'), ((2538, 2552), 'kivy.uix.button.Label', 'Label', ([], {'text': '""""""'}), "(text='')\n", (2543, 2552), False, 'from kivy.uix.button import Button, Label\n'), ((2719, 2750), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""P"""', 'underline': '(True)'}), "(text='P', underline=True)\n", (2724, 2750), False, 'from kivy.uix.button import Button, Label\n'), ((2776, 2792), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""20"""'}), "(text='20')\n", (2781, 2792), False, 'from kivy.uix.button import Button, Label\n'), ((2818, 2834), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""19"""'}), "(text='19')\n", (2823, 2834), False, 'from kivy.uix.button import Button, Label\n'), ((2860, 2876), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""18"""'}), "(text='18')\n", (2865, 2876), False, 'from kivy.uix.button import Button, Label\n'), ((2902, 2918), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""17"""'}), "(text='17')\n", (2907, 2918), False, 'from kivy.uix.button import Button, Label\n'), ((2944, 2960), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""16"""'}), "(text='16')\n", (2949, 2960), False, 'from kivy.uix.button import Button, Label\n'), ((2986, 3002), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""15"""'}), "(text='15')\n", (2991, 3002), False, 'from kivy.uix.button import Button, Label\n'), ((3028, 3043), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""B"""'}), "(text='B')\n", (3033, 3043), False, 'from kivy.uix.button import Button, Label\n'), ((3069, 3083), 'kivy.uix.button.Label', 'Label', ([], {'text': '""""""'}), "(text='')\n", (3074, 3083), False, 'from kivy.uix.button import Button, Label\n'), ((4166, 4179), 'helpers.new_targets', 'new_targets', ([], {}), '()\n', (4177, 4179), False, 'from helpers import new_targets\n')] |
import jittor as jt
from jittor import nn
from jittor import Module
from jittor import init
from jittor.contrib import concat
from model.backbone import resnet50, resnet101
from model.backbone import res2net101
Backbone_List = ['resnet50', 'resnet101', 'res2net101']
class DeepLab(Module):
def __init__(self, output_stride=16, num_classes=2, backbone = 'resnet101'):
super(DeepLab, self).__init__()
if not backbone in Backbone_List:
print('Invalid Backbone! Initialized to resnet101')
backbone = 'resnet101'
if backbone == 'resnet50':
self.backbone = resnet50(output_stride=output_stride)
elif backbone == 'res2net101':
self.backbone = res2net101(output_stride=output_stride)
else:
self.backbone = resnet101(output_stride=output_stride)
self.backbone_name = backbone
self.aspp = ASPP(output_stride)
self.decoder = Decoder(num_classes)
def execute(self, input):
low_level_feat, _, _, x = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
x = nn.resize(x, size=(input.shape[2], input.shape[3]), mode='bilinear')
return x
def get_backbone(self):
return self.backbone
def get_head(self):
return [self.aspp, self.decoder]
def get_loss(self, target, pred, ignore_index=None):
loss_pred = nn.cross_entropy_loss(pred, target, ignore_index=ignore_index)
return loss_pred
def update_params(self, loss, optimizer):
optimizer.zero_grad()
loss.backward()
optimizer.step()
class Decoder(nn.Module):
def __init__(self, num_classes):
super(Decoder, self).__init__()
low_level_inplanes = 256 # mobilenet = 24 resnet / res2net = 256 xception = 128
self.conv1 = nn.Conv(low_level_inplanes, 48, 1, bias=False)
self.bn1 = nn.BatchNorm(48)
self.relu = nn.ReLU()
self.last_conv = nn.Sequential(nn.Conv(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv(256, num_classes, kernel_size=1, stride=1))
def execute(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
#print (low_level_feat.shape)
x = nn.resize(x, size=(low_level_feat.shape[2], low_level_feat.shape[3]) , mode='bilinear')
x = concat((x, low_level_feat), dim=1)
x = self.last_conv(x)
return x
class Single_ASPPModule(Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(Single_ASPPModule, self).__init__()
self.atrous_conv = nn.Conv(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm(planes)
self.relu = nn.ReLU()
def execute(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class ASPP(Module):
def __init__(self, output_stride):
super(ASPP, self).__init__()
inplanes = 2048 # mobilnet = 320 resnet = 2048
if output_stride == 16:
dilations = [1, 6, 12, 18]
elif output_stride == 8:
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = Single_ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0])
self.aspp2 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1])
self.aspp3 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2])
self.aspp4 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3])
self.global_avg_pool = nn.Sequential(GlobalPooling(),
nn.Conv(inplanes, 256, 1, stride=1, bias=False),
nn.BatchNorm(256),
nn.ReLU())
self.conv1 = nn.Conv(1280, 256, 1, bias=False)
self.bn1 = nn.BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
def execute(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = x5.broadcast((1,1,x4.shape[2],x4.shape[3]))
x = concat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dropout(x)
return x
class GlobalPooling (Module):
def __init__(self):
super(GlobalPooling, self).__init__()
def execute (self, x):
return jt.mean(x, dims=[2,3], keepdims=1)
def main():
model = DeepLab(backbone = 'resnet101')
x = jt.ones([2, 3, 512, 512])
y = model(x)
print (y.shape)
_ = y.data
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
'''
DeepLab
59,572,610 total parameters.
59,462,946 training parameters.
'''
if __name__ == '__main__':
main() | [
"jittor.nn.Conv",
"model.backbone.res2net101",
"model.backbone.resnet101",
"jittor.nn.cross_entropy_loss",
"jittor.nn.Dropout",
"model.backbone.resnet50",
"jittor.nn.resize",
"jittor.nn.ReLU",
"jittor.mean",
"jittor.ones",
"jittor.contrib.concat",
"jittor.nn.BatchNorm"
] | [((5397, 5422), 'jittor.ones', 'jt.ones', (['[2, 3, 512, 512]'], {}), '([2, 3, 512, 512])\n', (5404, 5422), True, 'import jittor as jt\n'), ((1138, 1206), 'jittor.nn.resize', 'nn.resize', (['x'], {'size': '(input.shape[2], input.shape[3])', 'mode': '"""bilinear"""'}), "(x, size=(input.shape[2], input.shape[3]), mode='bilinear')\n", (1147, 1206), False, 'from jittor import nn\n'), ((1430, 1492), 'jittor.nn.cross_entropy_loss', 'nn.cross_entropy_loss', (['pred', 'target'], {'ignore_index': 'ignore_index'}), '(pred, target, ignore_index=ignore_index)\n', (1451, 1492), False, 'from jittor import nn\n'), ((1867, 1913), 'jittor.nn.Conv', 'nn.Conv', (['low_level_inplanes', '(48)', '(1)'], {'bias': '(False)'}), '(low_level_inplanes, 48, 1, bias=False)\n', (1874, 1913), False, 'from jittor import nn\n'), ((1933, 1949), 'jittor.nn.BatchNorm', 'nn.BatchNorm', (['(48)'], {}), '(48)\n', (1945, 1949), False, 'from jittor import nn\n'), ((1970, 1979), 'jittor.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1977, 1979), False, 'from jittor import nn\n'), ((2858, 2949), 'jittor.nn.resize', 'nn.resize', (['x'], {'size': '(low_level_feat.shape[2], low_level_feat.shape[3])', 'mode': '"""bilinear"""'}), "(x, size=(low_level_feat.shape[2], low_level_feat.shape[3]), mode=\n 'bilinear')\n", (2867, 2949), False, 'from jittor import nn\n'), ((2958, 2992), 'jittor.contrib.concat', 'concat', (['(x, low_level_feat)'], {'dim': '(1)'}), '((x, low_level_feat), dim=1)\n', (2964, 2992), False, 'from jittor.contrib import concat\n'), ((3227, 3340), 'jittor.nn.Conv', 'nn.Conv', (['inplanes', 'planes'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding', 'dilation': 'dilation', 'bias': '(False)'}), '(inplanes, planes, kernel_size=kernel_size, stride=1, padding=\n padding, dilation=dilation, bias=False)\n', (3234, 3340), False, 'from jittor import nn\n'), ((3398, 3418), 'jittor.nn.BatchNorm', 'nn.BatchNorm', (['planes'], {}), '(planes)\n', (3410, 3418), False, 'from jittor import nn\n'), ((3439, 3448), 'jittor.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3446, 3448), False, 'from jittor import nn\n'), ((4617, 4650), 'jittor.nn.Conv', 'nn.Conv', (['(1280)', '(256)', '(1)'], {'bias': '(False)'}), '(1280, 256, 1, bias=False)\n', (4624, 4650), False, 'from jittor import nn\n'), ((4670, 4687), 'jittor.nn.BatchNorm', 'nn.BatchNorm', (['(256)'], {}), '(256)\n', (4682, 4687), False, 'from jittor import nn\n'), ((4708, 4717), 'jittor.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4715, 4717), False, 'from jittor import nn\n'), ((4741, 4756), 'jittor.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (4751, 4756), False, 'from jittor import nn\n'), ((4998, 5033), 'jittor.contrib.concat', 'concat', (['(x1, x2, x3, x4, x5)'], {'dim': '(1)'}), '((x1, x2, x3, x4, x5), dim=1)\n', (5004, 5033), False, 'from jittor.contrib import concat\n'), ((5297, 5332), 'jittor.mean', 'jt.mean', (['x'], {'dims': '[2, 3]', 'keepdims': '(1)'}), '(x, dims=[2, 3], keepdims=1)\n', (5304, 5332), True, 'import jittor as jt\n'), ((618, 655), 'model.backbone.resnet50', 'resnet50', ([], {'output_stride': 'output_stride'}), '(output_stride=output_stride)\n', (626, 655), False, 'from model.backbone import resnet50, resnet101\n'), ((2019, 2084), 'jittor.nn.Conv', 'nn.Conv', (['(304)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(304, 256, kernel_size=3, stride=1, padding=1, bias=False)\n', (2026, 2084), False, 'from jittor import nn\n'), ((2125, 2142), 'jittor.nn.BatchNorm', 'nn.BatchNorm', (['(256)'], {}), '(256)\n', (2137, 2142), False, 'from jittor import nn\n'), ((2183, 2192), 'jittor.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2190, 2192), False, 'from jittor import nn\n'), ((2233, 2248), 'jittor.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2243, 2248), False, 'from jittor import nn\n'), ((2289, 2354), 'jittor.nn.Conv', 'nn.Conv', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(256, 256, kernel_size=3, stride=1, padding=1, bias=False)\n', (2296, 2354), False, 'from jittor import nn\n'), ((2395, 2412), 'jittor.nn.BatchNorm', 'nn.BatchNorm', (['(256)'], {}), '(256)\n', (2407, 2412), False, 'from jittor import nn\n'), ((2453, 2462), 'jittor.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2460, 2462), False, 'from jittor import nn\n'), ((2503, 2518), 'jittor.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2513, 2518), False, 'from jittor import nn\n'), ((2559, 2609), 'jittor.nn.Conv', 'nn.Conv', (['(256)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)'}), '(256, num_classes, kernel_size=1, stride=1)\n', (2566, 2609), False, 'from jittor import nn\n'), ((4427, 4474), 'jittor.nn.Conv', 'nn.Conv', (['inplanes', '(256)', '(1)'], {'stride': '(1)', 'bias': '(False)'}), '(inplanes, 256, 1, stride=1, bias=False)\n', (4434, 4474), False, 'from jittor import nn\n'), ((4521, 4538), 'jittor.nn.BatchNorm', 'nn.BatchNorm', (['(256)'], {}), '(256)\n', (4533, 4538), False, 'from jittor import nn\n'), ((4585, 4594), 'jittor.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4592, 4594), False, 'from jittor import nn\n'), ((723, 762), 'model.backbone.res2net101', 'res2net101', ([], {'output_stride': 'output_stride'}), '(output_stride=output_stride)\n', (733, 762), False, 'from model.backbone import res2net101\n'), ((805, 843), 'model.backbone.resnet101', 'resnet101', ([], {'output_stride': 'output_stride'}), '(output_stride=output_stride)\n', (814, 843), False, 'from model.backbone import resnet50, resnet101\n')] |
# Generated by Django 2.2 on 2020-03-30 15:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SeoUrl',
fields=[
('head_title', models.CharField(blank=True, max_length=55, verbose_name='head title')),
('meta_description', models.TextField(blank=True, max_length=160, verbose_name='meta description')),
('url', models.CharField(max_length=255, primary_key=True, serialize=False, unique=True, verbose_name='URL')),
],
options={
'verbose_name': 'SEO URL',
'verbose_name_plural': 'SEO URLs',
},
),
]
| [
"django.db.models.TextField",
"django.db.models.CharField"
] | [((308, 378), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(55)', 'verbose_name': '"""head title"""'}), "(blank=True, max_length=55, verbose_name='head title')\n", (324, 378), False, 'from django.db import migrations, models\n'), ((418, 495), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(160)', 'verbose_name': '"""meta description"""'}), "(blank=True, max_length=160, verbose_name='meta description')\n", (434, 495), False, 'from django.db import migrations, models\n'), ((522, 627), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'primary_key': '(True)', 'serialize': '(False)', 'unique': '(True)', 'verbose_name': '"""URL"""'}), "(max_length=255, primary_key=True, serialize=False, unique=\n True, verbose_name='URL')\n", (538, 627), False, 'from django.db import migrations, models\n')] |
import os
from shutil import rmtree
from ..settings import DATA_PATH
class Thumbnail(object):
def __init__(self, model):
self.model = model
self.module = model.module
self.filename = os.path.join('thumbnails', model.__tablename__, self.module.id, model.id)
self.full_filename = os.path.join(DATA_PATH, self.filename)
self.url = model.thumbnail_url
def __repr__(self):
return '<Thumbnail "%s">' % self.filename
def __bool__(self):
return bool(self.path)
@property
def path(self):
if self.local_path:
return self.filename
if self.online_path:
return self.online_path
return ''
@property
def local_path(self):
if os.path.exists(self.full_filename):
return self.full_filename
@property
def online_path(self):
if self.url:
return self.url
def download(self, options=None):
if options is None:
options = getattr(self.model, 'options', None)
if self.url:
self.module.get_thumbnail(self.full_filename, self.url, options)
def remove(self):
local_path = self.local_path
if local_path:
os.remove(local_path)
def get_media_path(item):
return os.path.join(DATA_PATH, 'medias', item.module_id, item.id)
def get_medias(item):
try:
return {Media(item, filename) for filename in os.listdir(get_media_path(item))}
except FileNotFoundError:
return set()
def remove_medias(item):
try:
rmtree(get_media_path(item))
except FileNotFoundError:
pass
class Media(object):
def __init__(self, item, media_filename):
self.item = item
self.media_filename = media_filename
self.filename = os.path.join('medias', item.module_id, item.id, media_filename)
self.full_filename = os.path.join(DATA_PATH, self.filename)
def __repr__(self):
return '<Media "%s:%s" "%s">' % (self.item.module_id, self.item.id, self.media_filename)
def remove(self):
full_filename = self.full_filename
if os.path.exists(full_filename):
os.remove(full_filename)
| [
"os.path.exists",
"os.path.join",
"os.remove"
] | [((1302, 1360), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""medias"""', 'item.module_id', 'item.id'], {}), "(DATA_PATH, 'medias', item.module_id, item.id)\n", (1314, 1360), False, 'import os\n'), ((214, 287), 'os.path.join', 'os.path.join', (['"""thumbnails"""', 'model.__tablename__', 'self.module.id', 'model.id'], {}), "('thumbnails', model.__tablename__, self.module.id, model.id)\n", (226, 287), False, 'import os\n'), ((317, 355), 'os.path.join', 'os.path.join', (['DATA_PATH', 'self.filename'], {}), '(DATA_PATH, self.filename)\n', (329, 355), False, 'import os\n'), ((757, 791), 'os.path.exists', 'os.path.exists', (['self.full_filename'], {}), '(self.full_filename)\n', (771, 791), False, 'import os\n'), ((1812, 1875), 'os.path.join', 'os.path.join', (['"""medias"""', 'item.module_id', 'item.id', 'media_filename'], {}), "('medias', item.module_id, item.id, media_filename)\n", (1824, 1875), False, 'import os\n'), ((1905, 1943), 'os.path.join', 'os.path.join', (['DATA_PATH', 'self.filename'], {}), '(DATA_PATH, self.filename)\n', (1917, 1943), False, 'import os\n'), ((2143, 2172), 'os.path.exists', 'os.path.exists', (['full_filename'], {}), '(full_filename)\n', (2157, 2172), False, 'import os\n'), ((1241, 1262), 'os.remove', 'os.remove', (['local_path'], {}), '(local_path)\n', (1250, 1262), False, 'import os\n'), ((2186, 2210), 'os.remove', 'os.remove', (['full_filename'], {}), '(full_filename)\n', (2195, 2210), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 11:43:21 2021
@author: Sander
"""
import random
# TODO: '/poll vote 88' has no output
# : invalid poll id gives no output
# Poll datastructure
#
# {
# "number or name of person":
# {
# "__id" : unique id for every poll
# "__name" : name,
# "__votees" :
# [
# ["votee", "option"],
# ["votee", "option"],
# ["votee", "option"],
# ...
# ]
# "Option 1" : votes,
# "Option 2" : votes,
# "Option 3" : votes,
# ...
# },
#
# "number or name of person":
# {
# "__id" : unique id for every poll
# "__name" : name,
# "__votees" :
# [
# ["votee", "option"],
# ["votee", "option"],
# ["votee", "option"],
# ...
# ]
# "Option 1" : votes,
# "Option 2" : votes,
# "Option 3" : votes,
# ...
# },
#
# ...
# }
# List of possible commands:
# /poll create <name with spaces> [<options 1>/<option 2>/...]
# >>> Creates a poll with that name in that persons account name
# >>> A poll can't have these '-[]/' characters in either its name nor its options.
# >>> Neither can a poll have __name of __votees as an option.
#
# /poll remove <id>
# >>> Removes the poll associated with that person
#
# /poll vote <name> -<option>
# >>> Adds a vote to that poll.
# --> Check if person already voted.
# TODO: Check if the voter votes for an existing option
polls = {}
def is_option(poll_id: str, option: str):
if option[:2] == "__":
return False
for name in polls:
if polls[name]["__id"] == int(poll_id):
if option in list(polls[name]):
return True
else:
return False
return False
def vote(poll_id, person, option):
# Loop over every poll until the ID matches
for poll_option in polls:
if polls[poll_option]["__id"] == int(poll_id):
# Loop over every vote until the voter has been found
for voter_index in range(len(polls[poll_option]["__votees"])):
# Change the vote, or return a sassy message
if polls[poll_option]["__votees"][voter_index][0] == person:
# Check if the votes votes again for the same
if polls[poll_option]["__votees"][voter_index][1] == option:
return "Can't vote for the same item twice, you clapped jonko"
else:
polls[poll_option][polls[poll_option]["__votees"][voter_index][1]] -= 1
polls[poll_option][option] += 1
polls[poll_option]["__votees"][voter_index][1] = option
return "Changed vote"
polls[poll_option]["__votees"].append([person, option])
polls[poll_option][option] += 1
return show_poll_by_id(int(poll_id))
def id_generator(name):
loop = True
while loop:
new_id = random.randint(1,999)
loop = False
for poll_name in polls:
if poll_name != name:
if polls[poll_name]["__id"] == new_id:
loop = True
break
return new_id
def create_poll(command, poll_creator):
if command.count("[") == 1 and command.count("]") == 1:
# Extract the poll name
name = command.split(" ", 2)[-1]
name = name[:name.find("[")]
# Remove last space if present
name = name[:-1] if name[-1] == " " else name
# Extract the options
options = command[command.find("[")+1:command.find("]")].split("/")
for option in options:
if option[:2] == "__":
return "Error: You can't start a poll option with '__', please follow the "
try:
polls.pop(poll_creator)
except:
pass
# Set default values
poll_id = id_generator(poll_creator)
polls[poll_creator] = {}
polls[poll_creator]["__id"] = poll_id
polls[poll_creator]["__name"] = name
polls[poll_creator]["__votees"] = []
# Insert options
for option in options:
polls[poll_creator][option] = 0
return show_poll_by_id(poll_id)
def vote_poll(command, poll_votee):
try:
command = command.split(" ", 2)[-1]
print(command)
poll_id, option = command.split(" ", 1)
for poll_name in polls:
#print("id->", polls[poll_name]["__id"], int(poll_id))
if polls[poll_name]["__id"] == int(poll_id):
if is_option(poll_id, option):
return vote(poll_id, poll_votee, option)
else:
return "Error: " + option + " isn't an option"
except:
return "Something went wrong, please make sure you follow the syntax of the command as described in '/poll help'"
def show_poll_by_id(ID):
for poll_item in polls:
if polls[poll_item]["__id"] == ID:
out = "*Poll by " + poll_item + " (" + str(polls[poll_item]["__id"]) + "): " + polls[poll_item]["__name"] + "*\n\n"
for option in list(polls[poll_item].keys()):
if option[:2] != "__":
out += "_" + option + "_ with " + str(polls[poll_item][option]) + " vote(s)\n"
return out
return "Error 404: Poll not found"
def get_poll_list():
out = "*List of all currently active polls:*\n\n"
for option in list(polls.keys()):
out += option + " (" + str(polls[option]["__id"]) + ") - " + polls[option]["__name"][:100]
return out
def handle_poll(command:str, person:str):
"""
Functionality:
This command can create a poll.
/poll create Klap die jonko [Yes/No/Unsure what to do/NOOOO]
Parameters
----------
command: Whatever the command is
creator:
Returns
-------
None.
"""
items = command.split(" ")
# Handle help command
if len(items) == 1:
return "You need to add something, perhaps 'help' to get the syntax help?"
elif len(items) == 2:
if items[1] == "help":
return ("*Syntax help for /poll:*\n"
+ "\n"
+ "You can vote for or create a poll:\n"
+ "\n"
+ "_Creating a poll:_\n"
+ "/poll create <name> [<option 1>/<option 2>/...]"
+ "\n"
+ "_Voting for a poll:_\n"
+ "/poll vote <name> <option>\n"
+ "\n"
+ "Keep in mind that you need to follow the formatting, else there might be some naming problems.\n"
+ "You can also list all polls with their IDs with /poll list.\n"
+ "then show it with /poll show <number or name (for special people ;)>")
if items[1] == "list":
return "".join("Poll (" + str(polls[x]["__id"]) + ") from: " + x[:100] + "\n\n" for x in list(polls.keys()))
elif len(items) == 3:
if items[1].lower() == "show":
return show_poll_by_id(int(items[2]))
else:
if items[1].lower() == "create":
return create_poll(command, person)
if items[1].lower() == "vote":
return vote_poll(command, person)
else:
return items[1] + " isn't a valid option"
if __name__ == "__main__":
print("\n\n>" + handle_poll("/poll create Moet die jonko geklapt worden? [Ja/Nee/Im a loser]", "Prive"))
print("\n\n>" + handle_poll("/poll list", "Prive"))
print("\n\n>" + handle_poll("/poll show 111", "Prive"))
print("\n\n>" + handle_poll("/poll vote 111 Ja", "Prive"))
print("\n\n>" + handle_poll("/poll vote 111 Nee", "Prive"))
print("\n\n>" + handle_poll("/poll vote 111 DSADSADSA", "Prive")) | [
"random.randint"
] | [((3314, 3336), 'random.randint', 'random.randint', (['(1)', '(999)'], {}), '(1, 999)\n', (3328, 3336), False, 'import random\n')] |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Laboratorio #3 - Predicción de textos
#
# * <NAME> - 17315
# * <NAME> - 17509
# * <NAME> - 17088
# %%
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.preprocessing.text import Tokenizer
from numpy import array
import random
import collections
from wordcloud import WordCloud
import matplotlib
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
import re
import nltk
nltk.download('stopwords')
# Definirmos lista de stopwords según nltk
stopwords = stopwords.words('english')
# Para el modelo
# %% [markdown]
# ## Importación y limpieza de datos
#
# ### 1. Abrir y leer archivos.
#
# Cabe mencionar que todos los archivos fueron convertidos a minúsculas, se quitan los urls y en algunas ocasiones, la mayoría de símbolos que consideramos innecesarios. También se quitan las stopwords, los números y finalmente las apostrophes. Además, se separan oraciones mediante los símbolos de **.**, **!** y **?**. Se debe validar que no hayan espacios vacíos luego de estas oraciones.
#
# #### Caso 1: Blogs
# %%
# Se instancian arreglos
blog = []
with open('./files/en_US.blogs.txt', 'r', encoding='utf-8') as blog_txt:
for line in blog_txt:
# Quitar saltos de linea y pasar todo a minusculas
line = line.rstrip('\n').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . ? ! y '
line = re.sub(r"[^\w.?!\d'\s]", '', line)
# Quitar números
line = re.sub(r'[0-9]', ' ', line)
# Quitar espacios extra
line = line.strip(' \t\n\r')
# Quitamos todas las stopwords
line = [word for word in line.split(' ') if word not in stopwords]
line = ' '.join(line)
# Finalmente, quitamos apostrofes
line = line.replace("'", '')
# Separar posibles oraciones
dotSentences = line.split('.')
excSentences = line.split('!')
queSentences = line.split('?')
# Validar y verificar que valga la pena recorrer varias oraciones
if len(dotSentences) > 1:
for sentence in dotSentences:
# Por cada posible oración, debemos quitar los símbolos de puntuación
sentence = re.sub(r'[^\w]', ' ', sentence).strip()
if len(sentence) > 1:
blog.append(sentence)
elif len(excSentences) > 1:
for sentence in excSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
blog.append(sentence)
elif len(queSentences) > 1:
for sentence in queSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
blog.append(sentence)
elif len(line.split(' ')) > 1:
line = re.sub(r'[^\w]', ' ', line).strip()
blog.append(line)
# %% [markdown]
# #### Caso 2: Noticias
#
# Este caso tuvo un procedimiento igual al caso 1.
# %%
news = []
with open('./files/en_US.news.txt', 'r', encoding='utf-8') as news_txt:
for line in news_txt:
# Quitar saltos de linea y pasar todo a minusculas
line = line.rstrip('\n').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . ? ! y '
line = re.sub(r"[^\w.?!\d'\s]", '', line)
# Quitar números
line = re.sub(r'[0-9]', ' ', line)
# Quitar espacios extra
line = line.strip(' \t\n\r')
# Quitamos todas las stopwords
line = [word for word in line.split(' ') if word not in stopwords]
line = ' '.join(line)
# Finalmente, quitamos apostrofes
line = line.replace("'", '')
# Separar posibles oraciones
dotSentences = line.split('.')
excSentences = line.split('!')
queSentences = line.split('?')
# Validar y verificar que valga la pena recorrer varias oraciones
if len(dotSentences) > 1:
for sentence in dotSentences:
# Por cada posible oración, debemos quitar los símbolos de puntuación
sentence = re.sub(r'[^\w]', ' ', sentence).strip()
if len(sentence) > 1:
news.append(sentence)
elif len(excSentences) > 1:
for sentence in excSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
news.append(sentence)
elif len(queSentences) > 1:
for sentence in queSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
news.append(sentence)
elif len(line.split(' ')) > 1:
line = re.sub(r'[^\w]', ' ', line).strip()
news.append(line)
# %% [markdown]
# #### Caso 3: Twitter
#
# En este caso, se toma cada distinto tweet como una oración. Es necesario quitar emojis y símbolos como #, $, %, !, @, etc. Además, se quitan urls y se permiten los símbolos: **.** **,** **'**
# %%
tweets = []
with open('./files/en_US.twitter.txt', 'r', encoding='utf-8') as twitter_txt:
for line in twitter_txt:
# Quitar \n y pasarlo a minusculas
line = line.replace('\n', '').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . , y '
line = re.sub(r"[^\w.,\d'\s]", '', line)
# Quitar números fuera de contexto
line = re.sub('^\d+\s|\s\d+\s|\s\d+$', '', line)
# Añadirlos a la lista de tweets
tweets.append(line.strip())
# %%
complete_data = blog + news + tweets
random.shuffle(complete_data)
# %%
data_size = int(len(complete_data)*0.005)
print('Se va a utilizar ' + str(data_size) + ' datos')
data = complete_data[:data_size]
# %% [markdown]
# Crear CSV con las palabras utilizadas para el entrenamiento
# %%
df = pd.DataFrame(data, columns=["oraciones"])
df.to_csv('training.csv', index=False)
# %% [markdown]
# Se genera un tokenizer lo cual es una representacion de enteros de cada palabra en nuestra data.
# %%
tokenizer = Tokenizer()
tokenizer.fit_on_texts([data])
encoded = tokenizer.texts_to_sequences([data])[0]
# %%
# Obtenemos el largo de nuestro vocabulario
vocab_size = len(tokenizer.word_index) + 1
# %%
# mapeamos 2 palabras a una palabra
sequences = list()
for i in range(2, len(encoded)):
sequence = encoded[i-2:i+1]
sequences.append(sequence)
max_length = max([len(seq) for seq in sequences])
sequences = pad_sequences(sequences, maxlen=max_length, padding='pre')
# %% [markdown]
# separamos en los elementos inputs y outputs
#
# %%
sequences = array(sequences)
X, y = sequences[:, :-1], sequences[:, -1]
y = to_categorical(y, num_classes=vocab_size)
# %% [markdown]
# Definimos el modelo
# %%
model = Sequential()
model.add(Embedding(vocab_size, 10, input_length=max_length-1))
model.add(LSTM(50))
model.add(Dense(vocab_size, activation='softmax'))
print(model.summary())
# %% [markdown]
# Compilamos el modelo
# %%
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# %%
# Entrenaoms el modelo
model.fit(X, y, epochs=150, verbose=2)
# %%
model.save_weights('deep_no_stopwords')
| [
"keras.preprocessing.text.Tokenizer",
"random.shuffle",
"nltk.corpus.stopwords.words",
"nltk.download",
"keras.utils.to_categorical",
"keras.models.Sequential",
"numpy.array",
"keras.layers.LSTM",
"keras.layers.Dense",
"pandas.DataFrame",
"re.sub",
"keras.preprocessing.sequence.pad_sequences",... | [((684, 710), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (697, 710), False, 'import nltk\n'), ((767, 793), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (782, 793), False, 'from nltk.corpus import stopwords\n'), ((6048, 6077), 'random.shuffle', 'random.shuffle', (['complete_data'], {}), '(complete_data)\n', (6062, 6077), False, 'import random\n'), ((6305, 6346), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['oraciones']"}), "(data, columns=['oraciones'])\n", (6317, 6346), True, 'import pandas as pd\n'), ((6520, 6531), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (6529, 6531), False, 'from keras.preprocessing.text import Tokenizer\n'), ((6928, 6986), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_length', 'padding': '"""pre"""'}), "(sequences, maxlen=max_length, padding='pre')\n", (6941, 6986), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((7070, 7086), 'numpy.array', 'array', (['sequences'], {}), '(sequences)\n', (7075, 7086), False, 'from numpy import array\n'), ((7134, 7175), 'keras.utils.to_categorical', 'to_categorical', (['y'], {'num_classes': 'vocab_size'}), '(y, num_classes=vocab_size)\n', (7148, 7175), False, 'from keras.utils import to_categorical\n'), ((7229, 7241), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7239, 7241), False, 'from keras.models import Sequential\n'), ((7252, 7306), 'keras.layers.Embedding', 'Embedding', (['vocab_size', '(10)'], {'input_length': '(max_length - 1)'}), '(vocab_size, 10, input_length=max_length - 1)\n', (7261, 7306), False, 'from keras.layers import Embedding\n'), ((7316, 7324), 'keras.layers.LSTM', 'LSTM', (['(50)'], {}), '(50)\n', (7320, 7324), False, 'from keras.layers import LSTM\n'), ((7336, 7375), 'keras.layers.Dense', 'Dense', (['vocab_size'], {'activation': '"""softmax"""'}), "(vocab_size, activation='softmax')\n", (7341, 7375), False, 'from keras.layers import Dense\n'), ((1595, 1638), 're.sub', 're.sub', (['"""^https?:\\\\/\\\\/.[\\\\r\\\\n]"""', '""""""', 'line'], {}), "('^https?:\\\\/\\\\/.[\\\\r\\\\n]', '', line)\n", (1601, 1638), False, 'import re\n'), ((1721, 1757), 're.sub', 're.sub', (['"""[^\\\\w.?!\\\\d\'\\\\s]"""', '""""""', 'line'], {}), '("[^\\\\w.?!\\\\d\'\\\\s]", \'\', line)\n', (1727, 1757), False, 'import re\n'), ((1796, 1822), 're.sub', 're.sub', (['"""[0-9]"""', '""" """', 'line'], {}), "('[0-9]', ' ', line)\n", (1802, 1822), False, 'import re\n'), ((3561, 3604), 're.sub', 're.sub', (['"""^https?:\\\\/\\\\/.[\\\\r\\\\n]"""', '""""""', 'line'], {}), "('^https?:\\\\/\\\\/.[\\\\r\\\\n]', '', line)\n", (3567, 3604), False, 'import re\n'), ((3687, 3723), 're.sub', 're.sub', (['"""[^\\\\w.?!\\\\d\'\\\\s]"""', '""""""', 'line'], {}), '("[^\\\\w.?!\\\\d\'\\\\s]", \'\', line)\n', (3693, 3723), False, 'import re\n'), ((3762, 3788), 're.sub', 're.sub', (['"""[0-9]"""', '""" """', 'line'], {}), "('[0-9]', ' ', line)\n", (3768, 3788), False, 'import re\n'), ((5669, 5712), 're.sub', 're.sub', (['"""^https?:\\\\/\\\\/.[\\\\r\\\\n]"""', '""""""', 'line'], {}), "('^https?:\\\\/\\\\/.[\\\\r\\\\n]', '', line)\n", (5675, 5712), False, 'import re\n'), ((5793, 5828), 're.sub', 're.sub', (['"""[^\\\\w.,\\\\d\'\\\\s]"""', '""""""', 'line'], {}), '("[^\\\\w.,\\\\d\'\\\\s]", \'\', line)\n', (5799, 5828), False, 'import re\n'), ((5885, 5933), 're.sub', 're.sub', (['"""^\\\\d+\\\\s|\\\\s\\\\d+\\\\s|\\\\s\\\\d+$"""', '""""""', 'line'], {}), "('^\\\\d+\\\\s|\\\\s\\\\d+\\\\s|\\\\s\\\\d+$', '', line)\n", (5891, 5933), False, 'import re\n'), ((2760, 2791), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (2766, 2791), False, 'import re\n'), ((4726, 4757), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (4732, 4757), False, 'import re\n'), ((2534, 2565), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (2540, 2565), False, 'import re\n'), ((2978, 3009), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (2984, 3009), False, 'import re\n'), ((4500, 4531), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (4506, 4531), False, 'import re\n'), ((4944, 4975), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (4950, 4975), False, 'import re\n'), ((3149, 3176), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'line'], {}), "('[^\\\\w]', ' ', line)\n", (3155, 3176), False, 'import re\n'), ((5115, 5142), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'line'], {}), "('[^\\\\w]', ' ', line)\n", (5121, 5142), False, 'import re\n')] |
import hashlib
import json
import math
import os
import dill
import base64
from sys import exit
import requests
from bson import ObjectId
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
#from cryptography.hazmat.primitives.asymmetric import padding
#from cryptography.hazmat.primitives import serialization, hashes
from tqdm import tqdm
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from datetime import datetime
from .Model import Model
from .DataLoader import DataLoader
from .Dataset import Dataset
from .saving.saving import save_data, determine_model, TF_str, mxnet_str, pytorch_str
from .web.urls import TOKEN_URL, HASH_URL, UPLOAD_DATA_URL
VERBOSITY = 1
MIN_VERBOSITY = 1
MID_VERBOSITY = 2
FULL_VERBOSITY = 3
_token = ""
_project = ""
_deployed = False
utcnow = datetime.utcnow
with open(os.path.join(os.path.dirname(__file__), "pub_cred_key.pub"), "rb") as key_file:
#pub_key_encryption = serialization.load_pem_public_key(key_file.read())
pub_key_encryption = PKCS1_OAEP.new(RSA.importKey(key_file.read()), SHA256)
# from SO
class bcolors:
PURPLE = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ORANGE = '\33[38;5;208m'
levels = {"WARNING": bcolors.ORANGE, "INFO": bcolors.PURPLE, "ERROR": bcolors.FAIL}
NEURO_AI_STR = f"[{bcolors.OKBLUE}Neuro Ai{bcolors.ENDC}]"
def api(token_, project_name, verbosity, deployed):
global _token
global _project
global VERBOSITY
global _deployed
if token_ == "":
token_ = os.environ.get("NPU_API_TOKEN", "")
_token = token_
VERBOSITY = verbosity
verbose_print(f"Verbosity level set to {VERBOSITY}", MID_VERBOSITY)
_deployed = deployed
if _deployed:
npu_print("DEPLOYMENT MODE")
params = {"token": _token, "project_name": project_name}
response = post(TOKEN_URL, json=params)
if response.status_code == 200:
npu_print("Token successfully authenticated")
_project = response.json()
npu_print(f"Using project: {project_name}")
return response
else:
raise ValueError(response.text)
# "API token not valid"
def getToken():
return _token
def auth_header():
return {"authorization": "Bearer " + getToken()}
def get_verbosity():
return VERBOSITY
def get_project():
return _project
def is_deployed():
return _deployed
def get_response(response):
try:
return response.json()
except Exception as e:
raise ConnectionError("Invalid response received. Error: {}".format(response.text))
# https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def add_kwargs_to_params(params, **kwargs):
params = {**params, **kwargs}
return params
def read_in_chunks(file_object, chunk_size=1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def check_model(model):
from .Task import Task
from .Model import Model
if not isinstance(model, Task) and not isinstance(model, str) and not isinstance(model, Model):
raise ValueError("Model is not a valid format. Please make sure you've compiled it first.")
def check_model_type(model, params):
from .Task import Task
if isinstance(model, Model):
params["model_name"] = model.name
params["model_attr"] = model.attr
elif isinstance(model, str) and not ObjectId.is_valid(model):
params["model_name"] = model
elif model != "" and not isinstance(model, Task):
params["modelId"] = model
def check_data_type(data, param_name, params):
from .Task import Task
if isinstance(data, Dataset):
params[param_name + "_name"] = data.id
elif isinstance(data, str) and not ObjectId.is_valid(data):
params[param_name + "_name"] = data
elif isinstance(data, HubDataset):
params[param_name + "Id"] = data.hub_meta
elif data != "" and not isinstance(data, Task):
params[param_name + "Id"] = data
params[f"{param_name}_hub_ds"] = isinstance(data, HubDataset)
def check_data(data, name=""):
if not isinstance(name, str):
raise ValueError("Name given is not valid. Please supply a string.")
if isinstance(data, dict):
return data
try:
import hub
hub_meta = {}
if hasattr(data, "dataset"):
if hasattr(data, "indexes"):
hub_meta["indexes"] = data.indexes
if hasattr(data, "subpath"):
hub_meta["subpath"] = data.subpath
data = data.dataset
if isinstance(data, hub.Dataset):
encrypted_token = base64.b64encode(
pub_key_encryption.encrypt(
json.dumps(data.token).encode()
)).decode()
#pub_key_encryption.encrypt(
# json.dumps(data.token).encode(),
# padding.OAEP(
# mgf=padding.MGF1(
# algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None))).decode()
hub_meta = {"url": data.url, "schema": data.schema, "token": encrypted_token, **hub_meta}
hub_meta = base64.b64encode(dill.dumps(hub_meta)).decode()
return HubDataset(hub_meta)
except Exception as e:
# print(e)
pass
if isinstance(data, str) and (data.endswith(("npy", "npz")) or ObjectId.is_valid(data) or data == ""):
return data
elif isinstance(data, Dataset):
return data
elif isinstance(data, DataLoader):
response = upload_data_loader(data, name)
else:
response = upload_data(data, name)
status_code = response.status_code
if status_code not in (204, 200, 201):
raise ConnectionAbortedError("Data upload has not worked: {}".format(response.content))
if status_code != 204:
response = get_response(response)
if isinstance(response, dict) and status_code == 200:
message = response.get("message")
npu_print(message)
response = response["id"]
return response
def slice_data(data):
id = data["id"]
start = data["indexes"]
end = None
if isinstance(start, slice):
end = start.stop
start = start.start
return id, start, end
def gen(dl):
for data_part in dl.numpy():
yield save_data(data_part)
def create_callback(encoder):
encoder_len = encoder.len
bar = tqdm(desc=f"{NEURO_AI_STR} Uploading", unit="B", unit_scale=True, total=encoder_len, unit_divisor=1024)
def callback(monitor):
bar.n = monitor.bytes_read
bar.refresh()
if monitor.bytes_read == encoder_len:
bar.close()
return callback
def get_progress_bar_uploader(file, json):
encoder = create_upload(file, json)
callback = create_callback(encoder)
monitor = MultipartEncoderMonitor(encoder, callback)
return monitor
def create_upload(file, _json):
return MultipartEncoder({
'file': ('file', file, 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'}),
'json': (None, json.dumps(_json), 'application/json', {}),
})
def upload_data_loader(dl, name=""):
verbose_print("Hashing data locally...", MID_VERBOSITY)
hash, size, length = dl.hash()
params = {"token": getToken(), "hash": hash, "collection": 1, "chunked": True, "is_last": False, "size": size,
"given_name": name, "input_shape": dl.shape, "project": get_project()}
# params = {"token": getToken(), "hash": hash, "collection": 1, "size": size, "given_name": name}
verbose_print("Checking if data is on servers...", MID_VERBOSITY)
response = get(HASH_URL, params=params)
if response.status_code == 200:
verbose_print("Data already uploaded. Will not reupload.", MID_VERBOSITY)
return response
npu_print("Data not on servers. Starting to upload. Total size of data is {}".format(convert_size(size)))
if length == 1:
return upload_data(next(dl.numpy()), name)
npu_print("{} chunks to upload...".format(length))
for i, data_part in enumerate(dl.numpy()):
verbose_print("Uploading chunk {} out of {}...".format(i + 1, length), MID_VERBOSITY)
if i == length - 1:
params["is_last"] = True
file = save_data(data_part)
monitor = get_progress_bar_uploader(file, params)
response = post(UPLOAD_DATA_URL, data=monitor,
headers={'Content-Type': monitor.content_type})
return response
def upload_data(data, name=""):
verbose_print("Saving data locally...", FULL_VERBOSITY)
generic_file = False
if isinstance(data, str):
file = open(data, "rb")
generic_file = True
else:
file = save_data(data)
verbose_print("Hashing...", FULL_VERBOSITY)
hash = hashlib.md5()
for piece in read_in_chunks(file):
hash.update(piece)
size = file.tell()
hash = hash.hexdigest()
verbose_print("Checking if data is on servers...", MID_VERBOSITY)
params = {"token": getToken(), "hash": hash, "collection": 1, "given_name": name, "project": get_project(),
"generic_file": generic_file}
response = get(HASH_URL, params=params, json=params)
if response.status_code == 200:
verbose_print("Data already on servers. Returning result...", MID_VERBOSITY)
file.close()
return response
npu_print("Data not found on servers. Total size of data is {}. Uploading now...".format(convert_size(size)))
file.seek(0)
monitor = get_progress_bar_uploader(file=file, json=params)
response = post(UPLOAD_DATA_URL, data=monitor,
headers={'Content-Type': monitor.content_type})
if isinstance(data, str):
file.close()
return response
def upload_sample(data, params):
required = (len(data[0]) if isinstance(data, (tuple, list)) else len(data)) > 10
if not required:
return False
data = [d[:10] for d in data] if isinstance(data, (tuple, list)) else data[:10]
def hash_file(file):
hash = hashlib.md5()
for piece in read_in_chunks(file):
hash.update(piece)
# break
hash = hash.hexdigest()
return hash
def validate_model(model, data):
library = determine_model(model)
if isinstance(data, str):
return
# data = convert_to_numpy(data)
if library == pytorch_str:
from torch import ones
elif library == mxnet_str:
from mxnet import nd
ones = nd.ones
elif library == TF_str:
from numpy import ones
else:
return
# raise ValueError("Cannot validate library: {} .".format(library))
placeholder_data = ones(data.shape)
model(placeholder_data)
def determine_data(data):
start = end = None
name = ""
if isinstance(data, dict):
data, start, end = slice_data(data)
if isinstance(data, Dataset):
name = data.id
data = data
return data, name, start, end
def npu_print(val, level="INFO"):
log_str = f"{NEURO_AI_STR} {utcnow_formatted()} - [{levels[level]}{level}{bcolors.ENDC}]: {val}"
print(f"{log_str}")
def verbose_print(str, verbosity):
if VERBOSITY >= verbosity:
npu_print(str)
def utcnow_formatted():
return utcnow().strftime("%H:%M:%S")
def make_request(request_type_function, url, data, headers, json, params, **kwargs):
if params is None:
params = {}
if json is None:
json = {}
if data is None:
data = {}
if headers is None:
headers = {}
try:
response = request_type_function(url, data=data, headers={**headers, **auth_header()}, json=json,
params=params, **kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as _:
response = response.json()
if "error" in response:
npu_print(f"Error: {response['error']}", level="ERROR")
elif "message" in response:
npu_print(f"Error: {response['message']}", level="ERROR")
raise Exception
# exit(1)
def post(url, data=None, headers=None, json=None, params=None, **kwargs):
return make_request(requests.post, url, data, headers, json, params, **kwargs)
def get(url, data=None, headers=None, json=None, params=None, **kwargs):
return make_request(requests.get, url, data, headers, json, params, **kwargs)
class HubDataset:
def __init__(self, hub_meta):
self.hub_meta = hub_meta
| [
"bson.ObjectId.is_valid",
"numpy.ones",
"hashlib.md5",
"requests_toolbelt.MultipartEncoderMonitor",
"math.pow",
"tqdm.tqdm",
"os.environ.get",
"json.dumps",
"math.log",
"os.path.dirname",
"dill.dumps"
] | [((3041, 3058), 'math.pow', 'math.pow', (['(1024)', 'i'], {}), '(1024, i)\n', (3049, 3058), False, 'import math\n'), ((7038, 7146), 'tqdm.tqdm', 'tqdm', ([], {'desc': 'f"""{NEURO_AI_STR} Uploading"""', 'unit': '"""B"""', 'unit_scale': '(True)', 'total': 'encoder_len', 'unit_divisor': '(1024)'}), "(desc=f'{NEURO_AI_STR} Uploading', unit='B', unit_scale=True, total=\n encoder_len, unit_divisor=1024)\n", (7042, 7146), False, 'from tqdm import tqdm\n'), ((7457, 7499), 'requests_toolbelt.MultipartEncoderMonitor', 'MultipartEncoderMonitor', (['encoder', 'callback'], {}), '(encoder, callback)\n', (7480, 7499), False, 'from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\n'), ((9442, 9455), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (9453, 9455), False, 'import hashlib\n'), ((10688, 10701), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (10699, 10701), False, 'import hashlib\n'), ((11309, 11325), 'numpy.ones', 'ones', (['data.shape'], {}), '(data.shape)\n', (11313, 11325), False, 'from numpy import ones\n'), ((1702, 1737), 'os.environ.get', 'os.environ.get', (['"""NPU_API_TOKEN"""', '""""""'], {}), "('NPU_API_TOKEN', '')\n", (1716, 1737), False, 'import os\n'), ((892, 917), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (907, 917), False, 'import os\n'), ((3004, 3030), 'math.log', 'math.log', (['size_bytes', '(1024)'], {}), '(size_bytes, 1024)\n', (3012, 3030), False, 'import math\n'), ((5998, 6021), 'bson.ObjectId.is_valid', 'ObjectId.is_valid', (['data'], {}), '(data)\n', (6015, 6021), False, 'from bson import ObjectId\n'), ((3999, 4023), 'bson.ObjectId.is_valid', 'ObjectId.is_valid', (['model'], {}), '(model)\n', (4016, 4023), False, 'from bson import ObjectId\n'), ((4346, 4369), 'bson.ObjectId.is_valid', 'ObjectId.is_valid', (['data'], {}), '(data)\n', (4363, 4369), False, 'from bson import ObjectId\n'), ((7707, 7724), 'json.dumps', 'json.dumps', (['_json'], {}), '(_json)\n', (7717, 7724), False, 'import json\n'), ((5801, 5821), 'dill.dumps', 'dill.dumps', (['hub_meta'], {}), '(hub_meta)\n', (5811, 5821), False, 'import dill\n'), ((5315, 5337), 'json.dumps', 'json.dumps', (['data.token'], {}), '(data.token)\n', (5325, 5337), False, 'import json\n')] |
from discord.ext import commands
import tasks
from datetime import datetime
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
# 接続に必要なオブジェクトを生成
client = discord.Client()
#投稿する日時
dateTimeList = [
'2019/11/19 18:09',
'2019/11/19 18:15',
'2019/11/19 18:20',
]
# 起動時に動作する処理
@client.event
async def on_ready():
print('ready')
# 指定時間に走る処理
async def SendMessage():
channel = get_channel(ctx)
await channel.send('時間だよ')
# 30秒に一回ループ
@tasks.loop(seconds=30)
async def time_check():
sleepTime = 0
# 現在の時刻
now = datetime.now().strftime('%Y/%m/%d %H:%M')
if now in dateTimeList :
print(now)
await SendMessage()
#該当時間だった場合は2重に投稿しないよう30秒余計に待機
await asyncio.sleep(30)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
#ループ処理
time_check.start()
# Botの起動とDiscordサーバーへの接続
client.run(token)
bot.run(token)
| [
"discord.ext.commands.Bot",
"datetime.datetime.now",
"tasks.loop"
] | [((111, 143), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""/"""'}), "(command_prefix='/')\n", (123, 143), False, 'from discord.ext import commands\n'), ((501, 523), 'tasks.loop', 'tasks.loop', ([], {'seconds': '(30)'}), '(seconds=30)\n', (511, 523), False, 'import tasks\n'), ((588, 602), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (600, 602), False, 'from datetime import datetime\n')] |
""" Code to implement ScaleFactor:: decorator supported
in gtlike.
The gtlike feature is documented here:
https://confluence.slac.stanford.edu/display/ST/Science+Tools+Development+Notes?focusedCommentId=103582318#comment-103582318
Author: <NAME>
"""
import operator
from copy import deepcopy
import numpy as np
from uw.like.Models import PowerLaw, PowerLawFlux, FileFunction, PLSuperExpCutoff, Gaussian, Constant, CompositeModel
from uw.darkmatter.spectral import DMFitFunction
def build_scale_factor(model_class):
"""
First, create the ScaleFactorPowerLaw and a comparison PowerLaw
>>> scale = 3.133141
>>> sfpl=ScaleFactorPowerLaw(ScaleFactor=scale)
>>> pl = PowerLaw()
>>> print sfpl.name
ScaleFactorPowerLaw
>>> print sfpl.gtlike['name']
ScaleFactor::PowerLaw
>>> print sfpl.pretty_name
ScaleFactor::PowerLaw
>>> print sfpl.full_name()
ScaleFactor::PowerLaw, e0=1000, ScaleFactor=3.133141
>>> print sfpl.e0 == pl.e0
True
>>> sfpl.default_extra_params == pl.default_extra_params
True
>>> np.all(sfpl.default_p == [1] + pl.default_p)
True
>>> print sfpl.param_names == ['ScaleFactor'] + pl.param_names
True
>>> print np.all(sfpl.default_mappers == Constant.default_mappers + PowerLaw.default_mappers)
True
>>> sfpl.default_extra_params == pl.default_extra_params
True
>>> sfpl.default_extra_attrs == sfpl.default_extra_attrs
True
>>> print sfpl.default_oomp_limits == ['ScaleFactor'] + PowerLaw.default_oomp_limits
True
Make sure that default_limits acts correclty
>>> dl=sfpl.default_limits
>>> dl['Norm'] == pl.default_limits['Norm']
True
>>> dl['Index'] == pl.default_limits['Index']
True
>>> dl['ScaleFactor'] == Constant.default_limits['Scale']
True
Make sure the __call__ function is correct
>>> energies=np.logspace(1,5,100)
>>> np.all(sfpl(energies) == scale*pl(energies))
True
And that the gradient follows the chain rule:
>>> grad = sfpl.external_gradient(energies)
>>> np.all(grad[0] == pl(energies))
True
>>> np.all(grad[1:] == scale*pl.external_gradient(energies))
True
Note, we can set default limits for ScaleFactor objects (necessary for XML creation):
>>> print sfpl.mappers == Constant.default_mappers + PowerLaw.default_mappers
True
>>> print np.all(sfpl.default_mappers == Constant.default_mappers + PowerLaw.default_mappers)
True
>>> sfpl.set_default_limits()
>>> sfpl.mappers == [Constant.default_limits['Scale'],PowerLaw.default_limits['Norm'],PowerLaw.default_limits['Index']]
True
Also, you can obtain the unfit parameters either as values of the object or with getp/setp
>>> sfpl.e0 == pl.e0 and sfpl['e0'] == pl.e0 and sfpl.getp('e0') == pl.e0
True
We can create ScaleFactor object for other models. For PowerLawFlux:
>>> sfpl2=ScaleFactorPowerLawFlux(ScaleFactor=scale)
>>> pl2 = PowerLawFlux()
>>> print sfpl2.name
ScaleFactorPowerLawFlux
>>> print sfpl2.gtlike['name']
ScaleFactor::PowerLaw2
>>> sfpl2.emax == pl2.emax and sfpl2.emax == pl2.emax
True
And, of course, the values are just scaled
>>> np.all(sfpl2(energies) == scale*pl2(energies))
True
There is also a ScaleFactorFileFunction object, which acts just like a FileFunction.
>>> from tempfile import NamedTemporaryFile
>>> temp = NamedTemporaryFile()
>>> filename = temp.name
>>> sfpl2.save_profile(filename, emin=1, emax=1e5)
>>> temp.seek(0)
>>> sfff = ScaleFactorFileFunction(ScaleFactor=5.5, normalization=1, file=filename)
>>> np.allclose(sfff(energies),5.5*sfpl2(energies),rtol=1e-10, atol=1e-10)
True
Note, it sets default_extra_attrs correctly:
>>> sfff.default_extra_attrs == FileFunction.default_extra_attrs
True
>>> sfff.file == filename
True
"""
# For a description of creating classes on the fly, see:
# http://jjinux.blogspot.com/2005/03/python-create-new-class-on-fly.html
c = type('ScaleFactor' + model_class.__name__, (CompositeModel,), {})
# Note, default_p, param_names, default_mappers, automatically taken care of by CompositeModel
c.default_extra_params=model_class.default_extra_params
c.default_extra_attrs=model_class.default_extra_attrs
c.gtlike = deepcopy(model_class.gtlike)
c.gtlike['name']='ScaleFactor::%s' % c.gtlike['name']
c.gtlike['param_names'].insert(0,'ScaleFactor')
c.gtlike['topointlike'].insert(0,operator.pos)
c.gtlike['togtlike'].insert(0,operator.pos)
def __init__(self, **kwargs):
scale = Constant(name='ScaleFactor')
scale.default_oomp_limits=['ScaleFactor']
if 'ScaleFactor' in kwargs:
scale['ScaleFactor'] = kwargs.pop('ScaleFactor')
m=model_class(**kwargs)
super(c,self).__init__(scale,m)
self.scale=scale
self.model=m
for p in c.default_extra_params.keys() + c.default_extra_attrs.keys():
# Allow getting and setting the default_extra_params and default_extra_attrs
# directly through the self.model object.
get=lambda self: getattr(self.model,p)
set=lambda self, value: setattr(self.model,p,value)
setattr(c,p,property(get, set, p))
c.__init__ = __init__
c.__call__ = lambda self,e: self.scale.__call__(e)*self.model.__call__(e)
c.pretty_name = property(lambda self: 'ScaleFactor::%s' % self.model.pretty_name)
c.full_name = lambda self: 'ScaleFactor::%s, ScaleFactor=%s' % (self.model.full_name(),self['ScaleFactor'])
def external_gradient(self, e):
a=self.scale.external_gradient(e)*self.model.__call__(e)
b=self.scale.__call__(e)*self.model.external_gradient(e)
return np.concatenate((a,b),axis=0)
c.external_gradient = external_gradient
return c
ScaleFactorPowerLaw=build_scale_factor(PowerLaw)
ScaleFactorPowerLawFlux=build_scale_factor(PowerLawFlux)
ScaleFactorFileFunction=build_scale_factor(FileFunction)
ScaleFactorDMFitFunction=build_scale_factor(DMFitFunction)
ScaleFactorPLSuperExpCutoff=build_scale_factor(PLSuperExpCutoff)
ScaleFactorGaussian=build_scale_factor(Gaussian)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"doctest.testmod",
"uw.like.Models.Constant",
"numpy.concatenate",
"copy.deepcopy"
] | [((5021, 5049), 'copy.deepcopy', 'deepcopy', (['model_class.gtlike'], {}), '(model_class.gtlike)\n', (5029, 5049), False, 'from copy import deepcopy\n'), ((6931, 6948), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6946, 6948), False, 'import doctest\n'), ((5311, 5339), 'uw.like.Models.Constant', 'Constant', ([], {'name': '"""ScaleFactor"""'}), "(name='ScaleFactor')\n", (5319, 5339), False, 'from uw.like.Models import PowerLaw, PowerLawFlux, FileFunction, PLSuperExpCutoff, Gaussian, Constant, CompositeModel\n'), ((6455, 6485), 'numpy.concatenate', 'np.concatenate', (['(a, b)'], {'axis': '(0)'}), '((a, b), axis=0)\n', (6469, 6485), True, 'import numpy as np\n')] |
import RPi.GPIO as GPIO
import time
s2 = 26
s3 = 27
signal = 17
NUM_CYCLES = 10
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(signal,GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(s2,GPIO.OUT)
GPIO.setup(s3,GPIO.OUT)
print("\n")
def loop():
temp = 1
while(1):
GPIO.output(s2,GPIO.LOW)
GPIO.output(s3,GPIO.LOW)
time.sleep(0.3)
start = time.time()
for impulse_count in range(NUM_CYCLES):
GPIO.wait_for_edge(signal, GPIO.FALLING)
duration = time.time() - start #seconds to run for loop
red = NUM_CYCLES / duration #in Hz
print("red value - ",red)
GPIO.output(s2,GPIO.LOW)
GPIO.output(s3,GPIO.HIGH)
time.sleep(0.3)
start = time.time()
for impulse_count in range(NUM_CYCLES):
GPIO.wait_for_edge(signal, GPIO.FALLING)
duration = time.time() - start
blue = NUM_CYCLES / duration
print("blue value - ",blue)
GPIO.output(s2,GPIO.HIGH)
GPIO.output(s3,GPIO.HIGH)
time.sleep(0.3)
start = time.time()
for impulse_count in range(NUM_CYCLES):
GPIO.wait_for_edge(signal, GPIO.FALLING)
duration = time.time() - start
green = NUM_CYCLES / duration
print("green value - ",green)
time.sleep(2)
if green > 12500:
print("Green")
elif red > 12500:
print("Red")
elif blue > 12500:
print("Blue")
def endprogram():
GPIO.cleanup()
if __name__=='__main__':
setup()
try:
loop()
except KeyboardInterrupt:
endprogram() | [
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"RPi.GPIO.wait_for_edge",
"time.sleep",
"time.time",
"RPi.GPIO.setmode"
] | [((99, 121), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (111, 121), True, 'import RPi.GPIO as GPIO\n'), ((124, 177), 'RPi.GPIO.setup', 'GPIO.setup', (['signal', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(signal, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (134, 177), True, 'import RPi.GPIO as GPIO\n'), ((179, 203), 'RPi.GPIO.setup', 'GPIO.setup', (['s2', 'GPIO.OUT'], {}), '(s2, GPIO.OUT)\n', (189, 203), True, 'import RPi.GPIO as GPIO\n'), ((205, 229), 'RPi.GPIO.setup', 'GPIO.setup', (['s3', 'GPIO.OUT'], {}), '(s3, GPIO.OUT)\n', (215, 229), True, 'import RPi.GPIO as GPIO\n'), ((1410, 1424), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (1422, 1424), True, 'import RPi.GPIO as GPIO\n'), ((292, 317), 'RPi.GPIO.output', 'GPIO.output', (['s2', 'GPIO.LOW'], {}), '(s2, GPIO.LOW)\n', (303, 317), True, 'import RPi.GPIO as GPIO\n'), ((321, 346), 'RPi.GPIO.output', 'GPIO.output', (['s3', 'GPIO.LOW'], {}), '(s3, GPIO.LOW)\n', (332, 346), True, 'import RPi.GPIO as GPIO\n'), ((350, 365), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (360, 365), False, 'import time\n'), ((378, 389), 'time.time', 'time.time', ([], {}), '()\n', (387, 389), False, 'import time\n'), ((623, 648), 'RPi.GPIO.output', 'GPIO.output', (['s2', 'GPIO.LOW'], {}), '(s2, GPIO.LOW)\n', (634, 648), True, 'import RPi.GPIO as GPIO\n'), ((652, 678), 'RPi.GPIO.output', 'GPIO.output', (['s3', 'GPIO.HIGH'], {}), '(s3, GPIO.HIGH)\n', (663, 678), True, 'import RPi.GPIO as GPIO\n'), ((682, 697), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (692, 697), False, 'import time\n'), ((710, 721), 'time.time', 'time.time', ([], {}), '()\n', (719, 721), False, 'import time\n'), ((918, 944), 'RPi.GPIO.output', 'GPIO.output', (['s2', 'GPIO.HIGH'], {}), '(s2, GPIO.HIGH)\n', (929, 944), True, 'import RPi.GPIO as GPIO\n'), ((948, 974), 'RPi.GPIO.output', 'GPIO.output', (['s3', 'GPIO.HIGH'], {}), '(s3, GPIO.HIGH)\n', (959, 974), True, 'import RPi.GPIO as GPIO\n'), ((978, 993), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (988, 993), False, 'import time\n'), ((1006, 1017), 'time.time', 'time.time', ([], {}), '()\n', (1015, 1017), False, 'import time\n'), ((1216, 1229), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1226, 1229), False, 'import time\n'), ((440, 480), 'RPi.GPIO.wait_for_edge', 'GPIO.wait_for_edge', (['signal', 'GPIO.FALLING'], {}), '(signal, GPIO.FALLING)\n', (458, 480), True, 'import RPi.GPIO as GPIO\n'), ((496, 507), 'time.time', 'time.time', ([], {}), '()\n', (505, 507), False, 'import time\n'), ((772, 812), 'RPi.GPIO.wait_for_edge', 'GPIO.wait_for_edge', (['signal', 'GPIO.FALLING'], {}), '(signal, GPIO.FALLING)\n', (790, 812), True, 'import RPi.GPIO as GPIO\n'), ((828, 839), 'time.time', 'time.time', ([], {}), '()\n', (837, 839), False, 'import time\n'), ((1068, 1108), 'RPi.GPIO.wait_for_edge', 'GPIO.wait_for_edge', (['signal', 'GPIO.FALLING'], {}), '(signal, GPIO.FALLING)\n', (1086, 1108), True, 'import RPi.GPIO as GPIO\n'), ((1124, 1135), 'time.time', 'time.time', ([], {}), '()\n', (1133, 1135), False, 'import time\n')] |
import datetime
import peewee as p
from breeze import App, Resource, Serializable
db = p.SqliteDatabase('users.db')
class UserModel(p.Model):
username = p.CharField(unique=True)
password = p.CharField()
email = p.CharField()
join_date = p.DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
class User(Serializable, Resource):
email = Serializable.String()
username = Serializable.String()
join_date = Serializable.DateTime()
@classmethod
def from_model(cls, model):
return cls.__init__(
email=model.email,
username=model.username,
join_date=model.join_date
)
@classmethod
def list(cls, filter_options):
return [
cls.from_model(u) for u in
UserModel.select().paginate(
filter_options.page + 1,
filter_options.size
)
]
db.connect()
app = App(User, prefix='/api/v1/', debug=True)
if __name__ == '__main__':
app.serve()
| [
"peewee.CharField",
"peewee.SqliteDatabase",
"breeze.Serializable.DateTime",
"breeze.Serializable.String",
"peewee.DateTimeField",
"breeze.App"
] | [((91, 119), 'peewee.SqliteDatabase', 'p.SqliteDatabase', (['"""users.db"""'], {}), "('users.db')\n", (107, 119), True, 'import peewee as p\n'), ((960, 1000), 'breeze.App', 'App', (['User'], {'prefix': '"""/api/v1/"""', 'debug': '(True)'}), "(User, prefix='/api/v1/', debug=True)\n", (963, 1000), False, 'from breeze import App, Resource, Serializable\n'), ((163, 187), 'peewee.CharField', 'p.CharField', ([], {'unique': '(True)'}), '(unique=True)\n', (174, 187), True, 'import peewee as p\n'), ((203, 216), 'peewee.CharField', 'p.CharField', ([], {}), '()\n', (214, 216), True, 'import peewee as p\n'), ((229, 242), 'peewee.CharField', 'p.CharField', ([], {}), '()\n', (240, 242), True, 'import peewee as p\n'), ((259, 305), 'peewee.DateTimeField', 'p.DateTimeField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (274, 305), True, 'import peewee as p\n'), ((395, 416), 'breeze.Serializable.String', 'Serializable.String', ([], {}), '()\n', (414, 416), False, 'from breeze import App, Resource, Serializable\n'), ((432, 453), 'breeze.Serializable.String', 'Serializable.String', ([], {}), '()\n', (451, 453), False, 'from breeze import App, Resource, Serializable\n'), ((470, 493), 'breeze.Serializable.DateTime', 'Serializable.DateTime', ([], {}), '()\n', (491, 493), False, 'from breeze import App, Resource, Serializable\n')] |
#This is a class because it stores its model parameters and has a 'prediction' function which returns predictions for input data
import numpy as np
from baseModel import baseModel, ModellingError as me
from datetime import datetime
import pandas as pd
class ModellingError(me): pass
class ConstantMonthlyModel(baseModel):
"""
A constant consumption model: consumption is estimated as the average of all input data
Input_data must respond to the method call 'consumption'
"""
n_parameters = 1
def __init__(self, data):
if len(data) <= 11:#(self.n_parameters + 2):
self.mean = np.nan
self.std = np.nan
#raise ModellingError, "Not enough input data"
if 'temperature' in data.dtype.names:
x = data['temperature']
self.xrange = [min(x), max(x)]
data_pd = pd.DataFrame.from_records(data)
data_pd['ts'] = data_pd['timestamp'].apply(datetime.fromtimestamp)
data_pd = data_pd.set_index(pd.DatetimeIndex(data_pd['ts']))
data_pd.sort_index(inplace=True)
last_month = data_pd[-1:].index.month+1 if data_pd[-1:].index.month != 12 else 1
self.mean = data_pd[data_pd.index.month==last_month]['consumption'].mean()
self.std = data_pd[data_pd.index.month==last_month]['consumption'].std()
def prediction(self, independent_data):
return np.array([self.mean] * len(independent_data))
def simulation(self, independent_data):
return self.std * np.random.randn(independent_data.size) + self.mean
def parameters(self):
return {'mean': self.mean, 'std': self.std} | [
"pandas.DataFrame.from_records",
"pandas.DatetimeIndex",
"numpy.random.randn"
] | [((823, 854), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['data'], {}), '(data)\n', (848, 854), True, 'import pandas as pd\n'), ((958, 989), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["data_pd['ts']"], {}), "(data_pd['ts'])\n", (974, 989), True, 'import pandas as pd\n'), ((1453, 1491), 'numpy.random.randn', 'np.random.randn', (['independent_data.size'], {}), '(independent_data.size)\n', (1468, 1491), True, 'import numpy as np\n')] |
# linear regression feature importance
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot
# define dataset
X, y = make_regression(n_samples=1000, n_features=10, n_informative=5, random_state=1)
# define the model
model = LinearRegression()
# fit the model
model.fit(X, y)
# get importance
importance = model.coef_
# summarize feature importance
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
pyplot.show()
| [
"sklearn.datasets.make_regression",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((188, 267), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': '(1000)', 'n_features': '(10)', 'n_informative': '(5)', 'random_state': '(1)'}), '(n_samples=1000, n_features=10, n_informative=5, random_state=1)\n', (203, 267), False, 'from sklearn.datasets import make_regression\n'), ((295, 313), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (311, 313), False, 'from sklearn.linear_model import LinearRegression\n'), ((583, 596), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (594, 596), False, 'from matplotlib import pyplot\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os
import os.path
import sys
from termcolor import colored
import shutil
__all__ = ['set_dir', 'get_dir', 'set_level']
# globals: logger file and directory:
LOG_DIR = None
_FILE_HANDLER = None
def _makedirs(dirname):
assert dirname is not None
if dirname == '' or os.path.isdir(dirname):
return
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
class _Formatter(logging.Formatter):
def format(self, record):
msg = '%(message)s'
if record.levelno == logging.WARNING:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]',
'yellow')
fmt = date + ' ' + colored(
'WRN', 'yellow', attrs=['blink']) + ' ' + msg
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]', 'red')
fmt = date + ' ' + colored(
'WRN', 'yellow', attrs=['blink']) + ' ' + msg
fmt = date + ' ' + colored(
'ERR', 'red', attrs=['blink', 'underline']) + ' ' + msg
elif record.levelno == logging.DEBUG:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]',
'blue')
fmt = date + ' ' + colored(
'DEBUG', 'blue', attrs=['blink']) + ' ' + msg
else:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]',
'green')
fmt = date + ' ' + msg
if hasattr(self, '_style'):
# Python3 compatibility
self._style._fmt = fmt
self._fmt = fmt
return super(_Formatter, self).format(record)
def _getlogger():
logger = logging.getLogger('PARL')
logger.propagate = False
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(_Formatter(datefmt='%m-%d %H:%M:%S'))
logger.addHandler(handler)
return logger
_logger = _getlogger()
_LOGGING_METHOD = [
'info', 'warning', 'error', 'critical', 'warn', 'exception', 'debug',
'setLevel'
]
# export logger functions
for func in _LOGGING_METHOD:
locals()[func] = getattr(_logger, func)
__all__.append(func)
# export Level information
_LOGGING_LEVEL = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
for level in _LOGGING_LEVEL:
locals()[level] = getattr(logging, level)
__all__.append(level)
def _set_file(path):
global _FILE_HANDLER
if os.path.isfile(path):
try:
os.remove(path)
except OSError:
pass
hdl = logging.FileHandler(filename=path, encoding='utf-8', mode='w')
hdl.setFormatter(_Formatter(datefmt='%m-%d %H:%M:%S'))
_FILE_HANDLER = hdl
_logger.addHandler(hdl)
def set_level(level):
# To set level, need create new handler
set_dir(get_dir())
_logger.setLevel(level)
def set_dir(dirname):
global LOG_DIR, _FILE_HANDLER
if _FILE_HANDLER:
# unload and close the old file handler, so that we may safely delete the logger directory
_logger.removeHandler(_FILE_HANDLER)
del _FILE_HANDLER
if not os.path.isdir(dirname):
_makedirs(dirname)
LOG_DIR = dirname
_set_file(os.path.join(dirname, 'log.log'))
def get_dir():
return LOG_DIR
# Will save log to log_dir/main_file_name/log.log by default
mod = sys.modules['__main__']
if hasattr(mod, '__file__'):
basename = os.path.basename(mod.__file__)
auto_dirname = os.path.join('log_dir', basename[:basename.rfind('.')])
shutil.rmtree(auto_dirname, ignore_errors=True)
set_dir(auto_dirname)
_logger.info("Argv: " + ' '.join(sys.argv))
| [
"logging.getLogger",
"logging.StreamHandler",
"termcolor.colored",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"logging.FileHandler",
"os.path.basename",
"shutil.rmtree",
"os.remove"
] | [((2526, 2551), 'logging.getLogger', 'logging.getLogger', (['"""PARL"""'], {}), "('PARL')\n", (2543, 2551), False, 'import logging\n'), ((2630, 2663), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2651, 2663), False, 'import logging\n'), ((3287, 3307), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (3301, 3307), False, 'import os\n'), ((3401, 3463), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'path', 'encoding': '"""utf-8"""', 'mode': '"""w"""'}), "(filename=path, encoding='utf-8', mode='w')\n", (3420, 3463), False, 'import logging\n'), ((4251, 4281), 'os.path.basename', 'os.path.basename', (['mod.__file__'], {}), '(mod.__file__)\n', (4267, 4281), False, 'import os\n'), ((4361, 4408), 'shutil.rmtree', 'shutil.rmtree', (['auto_dirname'], {'ignore_errors': '(True)'}), '(auto_dirname, ignore_errors=True)\n', (4374, 4408), False, 'import shutil\n'), ((924, 946), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (937, 946), False, 'import os\n'), ((980, 1000), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (991, 1000), False, 'import os\n'), ((3957, 3979), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (3970, 3979), False, 'import os\n'), ((4044, 4076), 'os.path.join', 'os.path.join', (['dirname', '"""log.log"""'], {}), "(dirname, 'log.log')\n", (4056, 4076), False, 'import os\n'), ((1244, 1318), 'termcolor.colored', 'colored', (['"""[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]"""', '"""yellow"""'], {}), "('[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]', 'yellow')\n", (1251, 1318), False, 'from termcolor import colored\n'), ((3334, 3349), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (3343, 3349), False, 'import os\n'), ((1557, 1628), 'termcolor.colored', 'colored', (['"""[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]"""', '"""red"""'], {}), "('[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]', 'red')\n", (1564, 1628), False, 'from termcolor import colored\n'), ((1925, 1997), 'termcolor.colored', 'colored', (['"""[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]"""', '"""blue"""'], {}), "('[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]', 'blue')\n", (1932, 1997), False, 'from termcolor import colored\n'), ((2166, 2239), 'termcolor.colored', 'colored', (['"""[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]"""', '"""green"""'], {}), "('[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]', 'green')\n", (2173, 2239), False, 'from termcolor import colored\n'), ((1383, 1424), 'termcolor.colored', 'colored', (['"""WRN"""', '"""yellow"""'], {'attrs': "['blink']"}), "('WRN', 'yellow', attrs=['blink'])\n", (1390, 1424), False, 'from termcolor import colored\n'), ((1677, 1718), 'termcolor.colored', 'colored', (['"""WRN"""', '"""yellow"""'], {'attrs': "['blink']"}), "('WRN', 'yellow', attrs=['blink'])\n", (1684, 1718), False, 'from termcolor import colored\n'), ((1779, 1830), 'termcolor.colored', 'colored', (['"""ERR"""', '"""red"""'], {'attrs': "['blink', 'underline']"}), "('ERR', 'red', attrs=['blink', 'underline'])\n", (1786, 1830), False, 'from termcolor import colored\n'), ((2062, 2103), 'termcolor.colored', 'colored', (['"""DEBUG"""', '"""blue"""'], {'attrs': "['blink']"}), "('DEBUG', 'blue', attrs=['blink'])\n", (2069, 2103), False, 'from termcolor import colored\n')] |
#!/usr/bin/env python
"""
juc2/examples/example_02.py
Move a rectangle across the terminal. <3
"""
from juc2 import art, Stage
stage = Stage(height=40, width=80, frame=True)
rectangle = art.Shapes.Rectangle(width=10, height=5, x=5, y=5)
while True:
stage.draw(rectangle, FPS=4)
if rectangle.x < 60:
rectangle.x += 1 | [
"juc2.Stage",
"juc2.art.Shapes.Rectangle"
] | [((139, 177), 'juc2.Stage', 'Stage', ([], {'height': '(40)', 'width': '(80)', 'frame': '(True)'}), '(height=40, width=80, frame=True)\n', (144, 177), False, 'from juc2 import art, Stage\n'), ((190, 240), 'juc2.art.Shapes.Rectangle', 'art.Shapes.Rectangle', ([], {'width': '(10)', 'height': '(5)', 'x': '(5)', 'y': '(5)'}), '(width=10, height=5, x=5, y=5)\n', (210, 240), False, 'from juc2 import art, Stage\n')] |
r"""
This module is a ITK Web server application.
The following command line illustrates how to use it::
$ python .../server/itk-tube.py --data /.../path-to-your-data-file
--data
Path to file to load.
Any WSLink executable script comes with a set of standard arguments that can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen.
--content /path-to-web-content/
Directory that you want to serve as static web content.
By default, this variable is empty which means that we rely on another
server to deliver the static content and the current process only
focuses on the WebSocket connectivity of clients.
"""
# import to process args
import os
import argparse
from json import JSONEncoder
import numpy as np
# import itk modules.
import itk
from itkTypes import itkCType
import ctypes
import sys
if sys.version_info > (3,0):
long = int
# import Twisted reactor for later callback
from twisted.internet import reactor
# import Web connectivity
from wslink import register
from wslink import server
from wslink.websocket import LinkProtocol
# import tube utils
from tubeutils import GetTubePoints
# maps itk ctype to other types
itkCTypeToOthers = {
itk.B: (ctypes.c_bool, 'UInt8Array', 1, 'i'),
itk.D: (ctypes.c_double, 'Float64Array', 8, 'f'),
itk.F: (ctypes.c_float, 'Float32Array', 4, 'f'),
itk.LD: (ctypes.c_longdouble, 'Float64Array', 8, 'f'),
itk.SC: (ctypes.c_char, 'Int8Array', 1, 'i'),
itk.SI: (ctypes.c_int, 'Int32Array', 4, 'i'),
itk.SL: (ctypes.c_long, 'Int32Array', 4, 'i'),
itk.SLL: (ctypes.c_longlong, 'Int32Array', 4, 'i'),
itk.SS: (ctypes.c_short, 'Int16Array', 2, 'i'),
itk.UC: (ctypes.c_ubyte, 'UInt8Array', 1, 'i'),
itk.UI: (ctypes.c_uint, 'UInt32Array', 4, 'i'),
itk.UL: (ctypes.c_ulong, 'UInt32Array', 4, 'i'),
itk.ULL: (ctypes.c_ulonglong, 'UInt32Array', 4, 'i'),
itk.US: (ctypes.c_ushort, 'UInt16Array', 2, 'i'),
}
# preload itk modules here so we don't incur lazy load
# on user request.
itk.TranslationTransform
itk.CompositeTransform
itk.ScaleTransform
itk.SegmentTubes
itk.Image
itk.ImageFileReader
itk.ImageIOFactory
itk.SpatialObjectReader
__id = 0
def get_next_id():
'''Simple ID generator.'''
global __id
__id += 1
return __id
def reset_id_counter():
global __id
__id = 0
class Tube(JSONEncoder):
def __init__(self, _id=-1, parent=-1, params=None, status='pending', color=None, **kwargs):
super(Tube, self).__init__(**kwargs)
self.id = _id
self.parent = parent
self.params = params or dict()
self.status = status
self.color = color or [1, 0, 0] # default to red
self.tube = None
self._mesh = None
@property
def mesh(self):
if not self.tube:
return None
if self._mesh:
return self._mesh
# generate mesh
points = GetTubePoints(self.tube)
# transform tube points properly
self.tube.ComputeObjectToWorldTransform()
transform = self.tube.GetIndexToWorldTransform()
scaling = [transform.GetMatrix()(i,i) for i in range(3)]
scale = sum(scaling) / len(scaling)
for i in range(len(points)):
pt, radius = points[i]
pt = list(transform.TransformPoint(pt))
points[i] = (pt, radius*scale)
self._mesh = points
return self._mesh
def copyfrom(self, obj):
'''Copies certain properties from a given dictionary.'''
if type(obj) is not dict:
raise Exception('Given object is not a dict!')
self.id = obj.get('id', self.id)
self.parent = obj.get('parent', self.parent)
self.params = obj.get('params', self.params)
self.status = obj.get('status', self.status)
self.color = obj.get('color', self.color)
def serialize(self):
return dict(
id=self.id,
parent=self.parent,
params=self.params,
status=self.status,
color=self.color,
mesh=self.mesh,
)
# =============================================================================
# Create Web Server to handle requests
# =============================================================================
class ItkTubeProtocol(LinkProtocol):
timelapse = 0.1 # Time in seconds
def __init__(self):
self.idToSpatialObject = dict()
# NOTE maybe not the most memory-efficient cache since we store points
# in array form here?
self.tubeCache = {}
self.pendingTubes = []
def loadDataFile(self, filename):
# Load file in ITK
self.loadItkImage(filename)
# setup image to world transform, since segmenttubes
# will use the world coords.
self.imageToWorldTransform = itk.CompositeTransform[itk.D, 3].New()
translate = itk.TranslationTransform[itk.D, 3].New()
translate.Translate(self.itkImage.GetOrigin())
scale = itk.ScaleTransform[itk.D, 3].New()
scale.Scale(self.itkImage.GetSpacing())
self.imageToWorldTransform.AppendTransform(translate)
self.imageToWorldTransform.AppendTransform(scale)
# setup segmenter
imgType = itk.Image[self.itkPixelType, self.dimensions]
self.segmentTubes = itk.SegmentTubes[imgType].New()
self.segmentTubes.SetInputImage(self.itkImage)
self.segmentTubes.SetDebug(True)
scaleVector = self.itkImage.GetSpacing()
offsetVector = self.itkImage.GetOrigin()
self.segmentTubes.GetTubeGroup().GetObjectToParentTransform() \
.SetScale(scaleVector)
self.segmentTubes.GetTubeGroup().GetObjectToParentTransform() \
.SetOffset(offsetVector)
self.segmentTubes.GetTubeGroup().GetObjectToParentTransform() \
.SetMatrix(self.itkImage.GetDirection())
self.segmentTubes.GetTubeGroup().ComputeObjectToWorldTransform()
# reset id counter between segments
reset_id_counter()
def scheduleQueueProcessing(self):
if len(self.pendingTubes) > 0:
reactor.callLater(ItkTubeProtocol.timelapse, self.processQueue)
def processQueue(self):
if len(self.pendingTubes) == 0:
return
itemToProcess = self.pendingTubes.pop(0)
# extract tube
seed = itk.Point[itk.D, self.dimensions](itemToProcess['position'])
index = self.itkImage.TransformPhysicalPointToContinuousIndex(seed)
scaleNorm = self.itkImage.GetSpacing()[0]
if itemToProcess['params']['scale']/scaleNorm < 0.3:
raise Exception('scale/scaleNorm < 0.3')
self.segmentTubes.SetRadius(itemToProcess['params']['scale']/scaleNorm)
tubeObj = self.segmentTubes.ExtractTube(index, itemToProcess['id'], True)
itemToProcess['status'] = 'done'
tube = Tube()
tube.copyfrom(itemToProcess)
if tubeObj:
self.segmentTubes.AddTube(tubeObj)
tube.tube = tubeObj
self.tubeCache[tube.id] = tube
# Publish any update
self.publish('itk.tube.mesh', tube.serialize())
# Reschedule ourself
self.scheduleQueueProcessing()
def loadItkImage(self, filename):
base = itk.ImageIOFactory.CreateImageIO(filename, itk.ImageIOFactory.ReadMode)
base.SetFileName(filename)
base.ReadImageInformation()
componentType = base.GetComponentType()
itkctype = itkCType.GetCType("float")
imageType = itk.Image[itkctype, base.GetNumberOfDimensions()]
reader = itk.ImageFileReader[imageType].New()
reader.SetFileName(filename)
reader.Update()
self.itkImage = reader.GetOutput()
self.itkPixelType = itkctype
self.dimensions = base.GetNumberOfDimensions()
@register('itk.volume.open')
def openVolume(self, filename):
self.loadDataFile(str(filename))
# Get ITK image data
imgCType, imgJsArrType, pixelSize, pixelDType = itkCTypeToOthers[self.itkPixelType]
pointer = long(self.itkImage.GetBufferPointer())
imageBuffer = ctypes.cast(pointer, ctypes.POINTER(imgCType))
size = self.itkImage.GetLargestPossibleRegion().GetSize()
length = size[0]*size[1]*size[2]
imgArray = np.ctypeslib.as_array(
(imgCType * length).from_address(ctypes.addressof(imageBuffer.contents)))
# Send data to client
return {
"extent": (0, size[0]-1, 0, size[1]-1, 0, size[2]-1),
"origin": list(self.itkImage.GetOrigin()),
"spacing": list(self.itkImage.GetSpacing()),
"typedArray": imgJsArrType,
"scalars": self.addAttachment(imgArray.tobytes()),
}
@register('itk.tube.save')
def saveTubes(self, filename):
dim = 3
tubeGroup = self.segmentTubes.GetTubeGroup()
writer = itk.SpatialObjectWriter[dim].New()
writer.SetFileName(str(filename))
writer.SetInput(tubeGroup)
writer.Update()
@register('itk.tube.generate')
def generateTube(self, coords, params):
coords = list(self.imageToWorldTransform.TransformPoint(coords))
itemToProcess = {
'id': get_next_id(),
'parent': -1, # denotes this tube's parent as not a tube
'position': coords,
'params': params,
'status': 'pending',
'color': [1, 0, 0], # default to red
}
self.pendingTubes.append(itemToProcess)
self.scheduleQueueProcessing()
return itemToProcess
@register('itk.tube.delete')
def deleteTube(self, tubeId):
tube = self.tubeCache[tubeId]
self.segmentTubes.DeleteTube(tube.tube)
del self.tubeCache[tubeId]
@register('itk.tube.setcolor')
def setTubeColor(self, tubeId, color):
self.tubeCache[tubeId].color = color
@register('itk.tube.reparent')
def reparentTubes(self, parent, children):
if type(parent) is not int or type(children) is not list:
raise Exception('Invalid arguments')
if parent in children:
raise Exception('Cannot have tube be parent of itself')
parentTube = self.tubeCache[parent].tube
for child in children:
# reparents child tube to parent tube
parentTube.AddSpatialObject(self.tubeCache[child].tube)
self.tubeCache[child].parent = parent
| [
"ctypes.addressof",
"ctypes.POINTER",
"itkTypes.itkCType.GetCType",
"wslink.register",
"tubeutils.GetTubePoints",
"itk.ImageIOFactory.CreateImageIO",
"twisted.internet.reactor.callLater"
] | [((8054, 8081), 'wslink.register', 'register', (['"""itk.volume.open"""'], {}), "('itk.volume.open')\n", (8062, 8081), False, 'from wslink import register\n'), ((8992, 9017), 'wslink.register', 'register', (['"""itk.tube.save"""'], {}), "('itk.tube.save')\n", (9000, 9017), False, 'from wslink import register\n'), ((9282, 9311), 'wslink.register', 'register', (['"""itk.tube.generate"""'], {}), "('itk.tube.generate')\n", (9290, 9311), False, 'from wslink import register\n'), ((9833, 9860), 'wslink.register', 'register', (['"""itk.tube.delete"""'], {}), "('itk.tube.delete')\n", (9841, 9860), False, 'from wslink import register\n'), ((10022, 10051), 'wslink.register', 'register', (['"""itk.tube.setcolor"""'], {}), "('itk.tube.setcolor')\n", (10030, 10051), False, 'from wslink import register\n'), ((10146, 10175), 'wslink.register', 'register', (['"""itk.tube.reparent"""'], {}), "('itk.tube.reparent')\n", (10154, 10175), False, 'from wslink import register\n'), ((3091, 3115), 'tubeutils.GetTubePoints', 'GetTubePoints', (['self.tube'], {}), '(self.tube)\n', (3104, 3115), False, 'from tubeutils import GetTubePoints\n'), ((7488, 7559), 'itk.ImageIOFactory.CreateImageIO', 'itk.ImageIOFactory.CreateImageIO', (['filename', 'itk.ImageIOFactory.ReadMode'], {}), '(filename, itk.ImageIOFactory.ReadMode)\n', (7520, 7559), False, 'import itk\n'), ((7699, 7725), 'itkTypes.itkCType.GetCType', 'itkCType.GetCType', (['"""float"""'], {}), "('float')\n", (7716, 7725), False, 'from itkTypes import itkCType\n'), ((6329, 6392), 'twisted.internet.reactor.callLater', 'reactor.callLater', (['ItkTubeProtocol.timelapse', 'self.processQueue'], {}), '(ItkTubeProtocol.timelapse, self.processQueue)\n', (6346, 6392), False, 'from twisted.internet import reactor\n'), ((8381, 8405), 'ctypes.POINTER', 'ctypes.POINTER', (['imgCType'], {}), '(imgCType)\n', (8395, 8405), False, 'import ctypes\n'), ((8606, 8644), 'ctypes.addressof', 'ctypes.addressof', (['imageBuffer.contents'], {}), '(imageBuffer.contents)\n', (8622, 8644), False, 'import ctypes\n')] |
# file eulxml/xmlmap/cerp.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import codecs
import datetime
import email
import logging
import os
import six
from eulxml import xmlmap
from eulxml.utils.compat import u
logger = logging.getLogger(__name__)
# CERP is described at http://siarchives.si.edu/cerp/ . XML spec available at
# http://www.records.ncdcr.gov/emailpreservation/mail-account/mail-account_docs.html
# schema resolves but appears to be empty as of April 2016
# Current schema : http://www.history.ncdcr.gov/SHRAB/ar/emailpreservation/mail-account/mail-account.xsd
# internally-reused and general-utility objects
#
class _BaseCerp(xmlmap.XmlObject):
'Common CERP namespace declarations'
ROOT_NS = 'http://www.archives.ncdcr.gov/mail-account'
ROOT_NAMESPACES = { 'xm': ROOT_NS }
class Parameter(_BaseCerp):
ROOT_NAME = 'Parameter'
name = xmlmap.StringField('xm:Name')
value = xmlmap.StringField('xm:Value')
def __str__(self):
return '%s=%s' % (self.name, self.value)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
class Header(_BaseCerp):
ROOT_NAME = 'Header'
name = xmlmap.StringField('xm:Name')
value = xmlmap.StringField('xm:Value')
comments = xmlmap.StringListField('xm:Comments')
def __str__(self):
return '%s: %s' % (self.name, self.value)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
class _BaseBody(_BaseCerp):
'''Common email header elements'''
content_type_list = xmlmap.StringListField('xm:ContentType')
charset_list = xmlmap.StringListField('xm:Charset')
content_name_list = xmlmap.StringListField('xm:ContentName')
content_type_comments_list = xmlmap.StringListField('xm:ContentTypeComments')
content_type_param_list = xmlmap.NodeListField('xm:ContentTypeParam', Parameter)
transfer_encoding_list = xmlmap.StringListField('xm:TransferEncoding')
transfer_encoding_comments_list = xmlmap.StringListField('xm:TransferEncodingComments')
content_id_list = xmlmap.StringListField('xm:ContentId')
content_id_comments_list = xmlmap.StringListField('xm:ContentIdComments')
description_list = xmlmap.StringListField('xm:Description')
description_comments_list = xmlmap.StringListField('xm:DescriptionComments')
disposition_list = xmlmap.StringListField('xm:Disposition')
disposition_file_name_list = xmlmap.StringListField('xm:DispositionFileName')
disposition_comments_list = xmlmap.StringListField('xm:DispositionComments')
disposition_params = xmlmap.NodeListField('xm:DispositionParams', Parameter)
other_mime_headers = xmlmap.NodeListField('xm:OtherMimeHeader', Header)
class Hash(_BaseCerp):
ROOT_NAME = 'Hash'
HASH_FUNCTION_CHOICES = [ 'MD5', 'WHIRLPOOL', 'SHA1', 'SHA224',
'SHA256', 'SHA384', 'SHA512', 'RIPEMD160']
value = xmlmap.StringField('xm:Value')
function = xmlmap.StringField('xm:Function',
choices=HASH_FUNCTION_CHOICES)
def __str__(self):
return self.value
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.function)
class _BaseExternal(_BaseCerp):
'''Common external entity reference elements'''
EOL_CHOICES = [ 'CR', 'LF', 'CRLF' ]
rel_path = xmlmap.StringField('xm:RelPath')
eol = xmlmap.StringField('xm:Eol', choices=EOL_CHOICES)
hash = xmlmap.NodeField('xm:Hash', Hash)
class _BaseContent(_BaseCerp):
'''Common content encoding elements'''
charset_list = xmlmap.StringListField('xm:CharSet')
transfer_encoding_list = xmlmap.StringListField('xm:TransferEncoding')
#
# messages and bodies
#
class BodyContent(_BaseContent):
ROOT_NAME = 'BodyContent'
content = xmlmap.StringField('xm:Content')
class ExtBodyContent(_BaseExternal, _BaseContent):
ROOT_NAME = 'ExtBodyContent'
local_id = xmlmap.IntegerField('xm:LocalId')
xml_wrapped = xmlmap.SimpleBooleanField('xm:XMLWrapped',
true='1', false='0')
class SingleBody(_BaseBody):
ROOT_NAME = 'SingleBody'
body_content = xmlmap.NodeField('xm:BodyContent', BodyContent)
ext_body_content = xmlmap.NodeField('xm:ExtBodyContent', ExtBodyContent)
child_message = xmlmap.NodeField('xm:ChildMessage', None) # this will be fixed below
@property
def content(self):
return self.body_content or \
self.ext_body_content or \
self.child_message
phantom_body = xmlmap.StringField('xm:PhantomBody')
class MultiBody(_BaseCerp):
ROOT_NAME = 'MultiBody'
preamble = xmlmap.StringField('xm:Preamble')
epilogue = xmlmap.StringField('xm:Epilogue')
single_body = xmlmap.NodeField('xm:SingleBody', SingleBody)
multi_body = xmlmap.NodeField('xm:MultiBody', 'self')
@property
def body(self):
return self.single_body or self.multi_body
class Incomplete(_BaseCerp):
ROOT_NAME = 'Incomplete'
error_type = xmlmap.StringField('xm:ErrorType')
error_location = xmlmap.StringField('xm:ErrorLocation')
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.error_type)
class _BaseMessage(_BaseCerp):
'''Common message elements'''
local_id = xmlmap.IntegerField('xm:LocalId')
message_id = xmlmap.StringField('xm:MessageId')
message_id_supplied = xmlmap.SimpleBooleanField('xm:MessageId/@Supplied',
true='1', false=None)
mime_version = xmlmap.StringField('xm:MimeVersion')
orig_date_list = xmlmap.StringListField('xm:OrigDate') # FIXME: really datetime
# NOTE: eulxml.xmlmap.DateTimeField supports specifying format,
# but we might need additional work since %z only works with
# strftime, not strptime
from_list = xmlmap.StringListField('xm:From')
sender_list = xmlmap.StringListField('xm:Sender')
to_list = xmlmap.StringListField('xm:To')
cc_list = xmlmap.StringListField('xm:Cc')
bcc_list = xmlmap.StringListField('xm:Bcc')
in_reply_to_list = xmlmap.StringListField('xm:InReplyTo')
references_list = xmlmap.StringListField('xm:References')
subject_list = xmlmap.StringListField('xm:Subject')
comments_list = xmlmap.StringListField('xm:Comments')
keywords_list = xmlmap.StringListField('xm:Keywords')
headers = xmlmap.NodeListField('xm:Header', Header)
single_body = xmlmap.NodeField('xm:SingleBody', SingleBody)
multi_body = xmlmap.NodeField('xm:MultiBody', MultiBody)
@property
def body(self):
return self.single_body or self.multi_body
incomplete_list = xmlmap.NodeField('xm:Incomplete', Incomplete)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
self.message_id or self.local_id or '(no id)')
class Message(_BaseMessage, _BaseExternal):
"""A single email message in a :class:`Folder`."""
ROOT_NAME = 'Message'
STATUS_FLAG_CHOICES = [ 'Seen', 'Answered', 'Flagged', 'Deleted',
'Draft', 'Recent']
status_flags = xmlmap.StringListField('xm:StatusFlag',
choices=STATUS_FLAG_CHOICES)
@classmethod
def from_email_message(cls, message, local_id=None):
'''
Convert an :class:`email.message.Message` or compatible message
object into a CERP XML :class:`eulxml.xmlmap.cerp.Message`. If an
id is specified, it will be stored in the Message <LocalId>.
:param message: `email.message.Message` object
:param id: optional message id to be set as `local_id`
:returns: :class:`eulxml.xmlmap.cerp.Message` instance populated
with message information
'''
result = cls()
if local_id is not None:
result.local_id = id
message_id = message.get('Message-Id')
if message_id:
result.message_id_supplied = True
result.message_id = message_id
result.mime_version = message.get('MIME-Version')
dates = message.get_all('Date', [])
result.orig_date_list.extend([parse_mail_date(d) for d in dates])
result.from_list.extend(message.get_all('From', []))
result.sender_list.extend(message.get_all('From', []))
try:
result.to_list.extend(message.get_all('To', []))
except UnicodeError:
print(repr(message['To']))
raise
result.cc_list.extend(message.get_all('Cc', []))
result.bcc_list.extend(message.get_all('Bcc', []))
result.in_reply_to_list.extend(message.get_all('In-Reply-To', []))
result.references_list.extend(message.get_all('References', []))
result.subject_list.extend(message.get_all('Subject', []))
result.comments_list.extend(message.get_all('Comments', []))
result.keywords_list.extend(message.get_all('Keywords', []))
headers = [ Header(name=key, value=val) for key, val in message.items() ]
result.headers.extend(headers)
# FIXME: skip multipart messages for now
if not message.is_multipart():
result.create_single_body()
# FIXME: this is a small subset of the actual elements CERP allows.
# we should add the rest of them, too.
# message.get_content_type() always returns something. only
# put it in the CERP if a Content-Type was explicitly specified.
if message['Content-Type']:
result.single_body.content_type_list.append(message.get_content_type())
if message.get_content_charset():
result.single_body.charset_list.append(message.get_content_charset())
if message.get_filename():
result.single_body.content_name_list.append(message.get_filename())
# FIXME: attaching the body_content only makes sense for text
# content types. we'll eventually need a better solution for
# non-text messages
result.single_body.create_body_content()
payload = message.get_payload(decode=False)
# if not unicode, attempt to convert
if isinstance(payload, six.binary_type):
charset = message.get_charset()
# decode according to the specified character set, if any
if charset is not None:
charset_decoder = codecs.getdecoder(str(charset))
payload, length = charset_decoder(payload)
# otherwise, just try to convert
else:
payload = u(payload)
# remove any control characters not allowed in XML
control_char_map = dict.fromkeys(range(32))
for i in [9, 10, 13]: # preserve horizontal tab, line feed, carriage return
del control_char_map[i]
payload = u(payload).translate(control_char_map)
result.single_body.body_content.content = payload
else:
# TODO: handle multipart
logger.warn('CERP conversion does not yet handle multipart')
# assume we've normalized newlines:
result.eol = EOLMAP[os.linesep]
return result
class ChildMessage(_BaseMessage):
ROOT_NAME = 'ChildMessage'
# no additional elements
# Patch-up from above. FIXME: This is necessary because of recursive
# NodeFields. eulxml.xmlmap.NodeField doesn't currently support these
SingleBody.child_message.node_class = ChildMessage
#
# accounts and folders
#
class Mbox(_BaseExternal):
ROOT_NAME = 'Mbox'
# no additional fields
class Folder(_BaseCerp):
"""A single email folder in an :class:`Account`, composed of multiple
:class:`Message` objects and associated metadata."""
ROOT_NAME = 'Folder'
name = xmlmap.StringField('xm:Name')
messages = xmlmap.NodeListField('xm:Message', Message)
subfolders = xmlmap.NodeListField('xm:Folder', 'self')
mboxes = xmlmap.NodeListField('xm:Mbox', Mbox)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
class ReferencesAccount(_BaseCerp):
ROOT_NAME = 'ReferencesAccount'
REF_TYPE_CHOICES = [ 'PreviousContent', 'SubsequentContent',
'Supplemental', 'SeeAlso', 'SeeInstead' ]
href = xmlmap.StringField('xm:Href')
email_address = xmlmap.StringField('xm:EmailAddress')
reference_type = xmlmap.StringField('xm:RefType',
choices=REF_TYPE_CHOICES)
class Account(_BaseCerp):
"""A single email account associated with a single email address and
composed of multiple :class:`Folder` objects and additional metadata."""
ROOT_NAME = 'Account'
XSD_SCHEMA = 'http://www.history.ncdcr.gov/SHRAB/ar/emailpreservation/mail-account/mail-account.xsd'
email_address = xmlmap.StringField('xm:EmailAddress')
global_id = xmlmap.StringField('xm:GlobalId')
references_accounts = xmlmap.NodeListField('xm:ReferencesAccount',
ReferencesAccount)
folders = xmlmap.NodeListField('xm:Folder', Folder)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
self.global_id or self.email_address or '(no id)')
def parse_mail_date(datestr):
'''Helper method used by :meth:`Message.from_email_message` to
convert dates from rfc822 format to iso 8601.
:param datestr: string containing a date in rfc822 format
:returns: string with date in iso 8601 format
'''
time_tuple = email.utils.parsedate_tz(datestr)
if time_tuple is None:
return datestr
dt = datetime.datetime.fromtimestamp(email.utils.mktime_tz(time_tuple))
return dt.isoformat()
EOLMAP = {
'\r': 'CR',
'\n': 'LF',
'\r\n': 'CRLF',
}
| [
"logging.getLogger",
"email.utils.parsedate_tz",
"eulxml.xmlmap.StringListField",
"eulxml.xmlmap.SimpleBooleanField",
"eulxml.xmlmap.NodeListField",
"eulxml.xmlmap.NodeField",
"eulxml.xmlmap.IntegerField",
"email.utils.mktime_tz",
"eulxml.utils.compat.u",
"eulxml.xmlmap.StringField"
] | [((838, 865), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (855, 865), False, 'import logging\n'), ((1489, 1518), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Name"""'], {}), "('xm:Name')\n", (1507, 1518), False, 'from eulxml import xmlmap\n'), ((1531, 1561), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Value"""'], {}), "('xm:Value')\n", (1549, 1561), False, 'from eulxml import xmlmap\n'), ((1787, 1816), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Name"""'], {}), "('xm:Name')\n", (1805, 1816), False, 'from eulxml import xmlmap\n'), ((1829, 1859), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Value"""'], {}), "('xm:Value')\n", (1847, 1859), False, 'from eulxml import xmlmap\n'), ((1875, 1912), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Comments"""'], {}), "('xm:Comments')\n", (1897, 1912), False, 'from eulxml import xmlmap\n'), ((2169, 2209), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:ContentType"""'], {}), "('xm:ContentType')\n", (2191, 2209), False, 'from eulxml import xmlmap\n'), ((2229, 2265), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Charset"""'], {}), "('xm:Charset')\n", (2251, 2265), False, 'from eulxml import xmlmap\n'), ((2290, 2330), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:ContentName"""'], {}), "('xm:ContentName')\n", (2312, 2330), False, 'from eulxml import xmlmap\n'), ((2364, 2412), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:ContentTypeComments"""'], {}), "('xm:ContentTypeComments')\n", (2386, 2412), False, 'from eulxml import xmlmap\n'), ((2443, 2497), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:ContentTypeParam"""', 'Parameter'], {}), "('xm:ContentTypeParam', Parameter)\n", (2463, 2497), False, 'from eulxml import xmlmap\n'), ((2527, 2572), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:TransferEncoding"""'], {}), "('xm:TransferEncoding')\n", (2549, 2572), False, 'from eulxml import xmlmap\n'), ((2611, 2664), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:TransferEncodingComments"""'], {}), "('xm:TransferEncodingComments')\n", (2633, 2664), False, 'from eulxml import xmlmap\n'), ((2687, 2725), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:ContentId"""'], {}), "('xm:ContentId')\n", (2709, 2725), False, 'from eulxml import xmlmap\n'), ((2757, 2803), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:ContentIdComments"""'], {}), "('xm:ContentIdComments')\n", (2779, 2803), False, 'from eulxml import xmlmap\n'), ((2827, 2867), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Description"""'], {}), "('xm:Description')\n", (2849, 2867), False, 'from eulxml import xmlmap\n'), ((2900, 2948), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:DescriptionComments"""'], {}), "('xm:DescriptionComments')\n", (2922, 2948), False, 'from eulxml import xmlmap\n'), ((2972, 3012), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Disposition"""'], {}), "('xm:Disposition')\n", (2994, 3012), False, 'from eulxml import xmlmap\n'), ((3046, 3094), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:DispositionFileName"""'], {}), "('xm:DispositionFileName')\n", (3068, 3094), False, 'from eulxml import xmlmap\n'), ((3127, 3175), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:DispositionComments"""'], {}), "('xm:DispositionComments')\n", (3149, 3175), False, 'from eulxml import xmlmap\n'), ((3202, 3257), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:DispositionParams"""', 'Parameter'], {}), "('xm:DispositionParams', Parameter)\n", (3222, 3257), False, 'from eulxml import xmlmap\n'), ((3283, 3333), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:OtherMimeHeader"""', 'Header'], {}), "('xm:OtherMimeHeader', Header)\n", (3303, 3333), False, 'from eulxml import xmlmap\n'), ((3536, 3566), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Value"""'], {}), "('xm:Value')\n", (3554, 3566), False, 'from eulxml import xmlmap\n'), ((3582, 3646), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Function"""'], {'choices': 'HASH_FUNCTION_CHOICES'}), "('xm:Function', choices=HASH_FUNCTION_CHOICES)\n", (3600, 3646), False, 'from eulxml import xmlmap\n'), ((3945, 3977), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:RelPath"""'], {}), "('xm:RelPath')\n", (3963, 3977), False, 'from eulxml import xmlmap\n'), ((3988, 4037), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Eol"""'], {'choices': 'EOL_CHOICES'}), "('xm:Eol', choices=EOL_CHOICES)\n", (4006, 4037), False, 'from eulxml import xmlmap\n'), ((4049, 4082), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:Hash"""', 'Hash'], {}), "('xm:Hash', Hash)\n", (4065, 4082), False, 'from eulxml import xmlmap\n'), ((4178, 4214), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:CharSet"""'], {}), "('xm:CharSet')\n", (4200, 4214), False, 'from eulxml import xmlmap\n'), ((4244, 4289), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:TransferEncoding"""'], {}), "('xm:TransferEncoding')\n", (4266, 4289), False, 'from eulxml import xmlmap\n'), ((4396, 4428), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Content"""'], {}), "('xm:Content')\n", (4414, 4428), False, 'from eulxml import xmlmap\n'), ((4530, 4563), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""xm:LocalId"""'], {}), "('xm:LocalId')\n", (4549, 4563), False, 'from eulxml import xmlmap\n'), ((4582, 4645), 'eulxml.xmlmap.SimpleBooleanField', 'xmlmap.SimpleBooleanField', (['"""xm:XMLWrapped"""'], {'true': '"""1"""', 'false': '"""0"""'}), "('xm:XMLWrapped', true='1', false='0')\n", (4607, 4645), False, 'from eulxml import xmlmap\n'), ((4738, 4785), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:BodyContent"""', 'BodyContent'], {}), "('xm:BodyContent', BodyContent)\n", (4754, 4785), False, 'from eulxml import xmlmap\n'), ((4809, 4862), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:ExtBodyContent"""', 'ExtBodyContent'], {}), "('xm:ExtBodyContent', ExtBodyContent)\n", (4825, 4862), False, 'from eulxml import xmlmap\n'), ((4883, 4924), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:ChildMessage"""', 'None'], {}), "('xm:ChildMessage', None)\n", (4899, 4924), False, 'from eulxml import xmlmap\n'), ((5123, 5159), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:PhantomBody"""'], {}), "('xm:PhantomBody')\n", (5141, 5159), False, 'from eulxml import xmlmap\n'), ((5233, 5266), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Preamble"""'], {}), "('xm:Preamble')\n", (5251, 5266), False, 'from eulxml import xmlmap\n'), ((5282, 5315), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Epilogue"""'], {}), "('xm:Epilogue')\n", (5300, 5315), False, 'from eulxml import xmlmap\n'), ((5335, 5380), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:SingleBody"""', 'SingleBody'], {}), "('xm:SingleBody', SingleBody)\n", (5351, 5380), False, 'from eulxml import xmlmap\n'), ((5398, 5438), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:MultiBody"""', '"""self"""'], {}), "('xm:MultiBody', 'self')\n", (5414, 5438), False, 'from eulxml import xmlmap\n'), ((5601, 5635), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:ErrorType"""'], {}), "('xm:ErrorType')\n", (5619, 5635), False, 'from eulxml import xmlmap\n'), ((5657, 5695), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:ErrorLocation"""'], {}), "('xm:ErrorLocation')\n", (5675, 5695), False, 'from eulxml import xmlmap\n'), ((5873, 5906), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""xm:LocalId"""'], {}), "('xm:LocalId')\n", (5892, 5906), False, 'from eulxml import xmlmap\n'), ((5924, 5958), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:MessageId"""'], {}), "('xm:MessageId')\n", (5942, 5958), False, 'from eulxml import xmlmap\n'), ((5985, 6058), 'eulxml.xmlmap.SimpleBooleanField', 'xmlmap.SimpleBooleanField', (['"""xm:MessageId/@Supplied"""'], {'true': '"""1"""', 'false': 'None'}), "('xm:MessageId/@Supplied', true='1', false=None)\n", (6010, 6058), False, 'from eulxml import xmlmap\n'), ((6090, 6126), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:MimeVersion"""'], {}), "('xm:MimeVersion')\n", (6108, 6126), False, 'from eulxml import xmlmap\n'), ((6148, 6185), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:OrigDate"""'], {}), "('xm:OrigDate')\n", (6170, 6185), False, 'from eulxml import xmlmap\n'), ((6389, 6422), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:From"""'], {}), "('xm:From')\n", (6411, 6422), False, 'from eulxml import xmlmap\n'), ((6441, 6476), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Sender"""'], {}), "('xm:Sender')\n", (6463, 6476), False, 'from eulxml import xmlmap\n'), ((6491, 6522), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:To"""'], {}), "('xm:To')\n", (6513, 6522), False, 'from eulxml import xmlmap\n'), ((6537, 6568), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Cc"""'], {}), "('xm:Cc')\n", (6559, 6568), False, 'from eulxml import xmlmap\n'), ((6584, 6616), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Bcc"""'], {}), "('xm:Bcc')\n", (6606, 6616), False, 'from eulxml import xmlmap\n'), ((6640, 6678), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:InReplyTo"""'], {}), "('xm:InReplyTo')\n", (6662, 6678), False, 'from eulxml import xmlmap\n'), ((6701, 6740), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:References"""'], {}), "('xm:References')\n", (6723, 6740), False, 'from eulxml import xmlmap\n'), ((6760, 6796), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Subject"""'], {}), "('xm:Subject')\n", (6782, 6796), False, 'from eulxml import xmlmap\n'), ((6817, 6854), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Comments"""'], {}), "('xm:Comments')\n", (6839, 6854), False, 'from eulxml import xmlmap\n'), ((6875, 6912), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:Keywords"""'], {}), "('xm:Keywords')\n", (6897, 6912), False, 'from eulxml import xmlmap\n'), ((6927, 6968), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:Header"""', 'Header'], {}), "('xm:Header', Header)\n", (6947, 6968), False, 'from eulxml import xmlmap\n'), ((6988, 7033), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:SingleBody"""', 'SingleBody'], {}), "('xm:SingleBody', SingleBody)\n", (7004, 7033), False, 'from eulxml import xmlmap\n'), ((7051, 7094), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:MultiBody"""', 'MultiBody'], {}), "('xm:MultiBody', MultiBody)\n", (7067, 7094), False, 'from eulxml import xmlmap\n'), ((7203, 7248), 'eulxml.xmlmap.NodeField', 'xmlmap.NodeField', (['"""xm:Incomplete"""', 'Incomplete'], {}), "('xm:Incomplete', Incomplete)\n", (7219, 7248), False, 'from eulxml import xmlmap\n'), ((7654, 7722), 'eulxml.xmlmap.StringListField', 'xmlmap.StringListField', (['"""xm:StatusFlag"""'], {'choices': 'STATUS_FLAG_CHOICES'}), "('xm:StatusFlag', choices=STATUS_FLAG_CHOICES)\n", (7676, 7722), False, 'from eulxml import xmlmap\n'), ((12369, 12398), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Name"""'], {}), "('xm:Name')\n", (12387, 12398), False, 'from eulxml import xmlmap\n'), ((12414, 12457), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:Message"""', 'Message'], {}), "('xm:Message', Message)\n", (12434, 12457), False, 'from eulxml import xmlmap\n'), ((12475, 12516), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:Folder"""', '"""self"""'], {}), "('xm:Folder', 'self')\n", (12495, 12516), False, 'from eulxml import xmlmap\n'), ((12530, 12567), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:Mbox"""', 'Mbox'], {}), "('xm:Mbox', Mbox)\n", (12550, 12567), False, 'from eulxml import xmlmap\n'), ((12875, 12904), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:Href"""'], {}), "('xm:Href')\n", (12893, 12904), False, 'from eulxml import xmlmap\n'), ((12925, 12962), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:EmailAddress"""'], {}), "('xm:EmailAddress')\n", (12943, 12962), False, 'from eulxml import xmlmap\n'), ((12984, 13042), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:RefType"""'], {'choices': 'REF_TYPE_CHOICES'}), "('xm:RefType', choices=REF_TYPE_CHOICES)\n", (13002, 13042), False, 'from eulxml import xmlmap\n'), ((13386, 13423), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:EmailAddress"""'], {}), "('xm:EmailAddress')\n", (13404, 13423), False, 'from eulxml import xmlmap\n'), ((13440, 13473), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""xm:GlobalId"""'], {}), "('xm:GlobalId')\n", (13458, 13473), False, 'from eulxml import xmlmap\n'), ((13500, 13563), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:ReferencesAccount"""', 'ReferencesAccount'], {}), "('xm:ReferencesAccount', ReferencesAccount)\n", (13520, 13563), False, 'from eulxml import xmlmap\n'), ((13590, 13631), 'eulxml.xmlmap.NodeListField', 'xmlmap.NodeListField', (['"""xm:Folder"""', 'Folder'], {}), "('xm:Folder', Folder)\n", (13610, 13631), False, 'from eulxml import xmlmap\n'), ((14067, 14100), 'email.utils.parsedate_tz', 'email.utils.parsedate_tz', (['datestr'], {}), '(datestr)\n', (14091, 14100), False, 'import email\n'), ((14192, 14225), 'email.utils.mktime_tz', 'email.utils.mktime_tz', (['time_tuple'], {}), '(time_tuple)\n', (14213, 14225), False, 'import email\n'), ((11163, 11173), 'eulxml.utils.compat.u', 'u', (['payload'], {}), '(payload)\n', (11164, 11173), False, 'from eulxml.utils.compat import u\n'), ((11445, 11455), 'eulxml.utils.compat.u', 'u', (['payload'], {}), '(payload)\n', (11446, 11455), False, 'from eulxml.utils.compat import u\n')] |
import matplotlib.pyplot as plt
import librosa.display
plt.rcParams.update({'font.size': 16})
y, sr = librosa.load(librosa.util.example_audio_file())
plt.figure(figsize=(18, 7))
librosa.display.waveplot(y, sr=sr, x_axis='s')
print(sr)
plt.ylabel('Sampling Rate',fontsize=32)
plt.xlabel('Time (s)',fontsize=32)
plt.show() | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((55, 93), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (74, 93), True, 'import matplotlib.pyplot as plt\n'), ((151, 178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 7)'}), '(figsize=(18, 7))\n', (161, 178), True, 'import matplotlib.pyplot as plt\n'), ((236, 276), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sampling Rate"""'], {'fontsize': '(32)'}), "('Sampling Rate', fontsize=32)\n", (246, 276), True, 'import matplotlib.pyplot as plt\n'), ((276, 311), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {'fontsize': '(32)'}), "('Time (s)', fontsize=32)\n", (286, 311), True, 'import matplotlib.pyplot as plt\n'), ((311, 321), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (319, 321), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
'''
Autor: <NAME>, <NAME>, <NAME>, <NAME>
Version: 1.3
Server fuer das hosten des FaSta-Dashboards
Copyright 2018 The Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================
'''
import sys
import dash
import dash_auth
import dash_core_components
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import flask
import pandas as pd
import plotly.graph_objs as go
import pymongo
import threading
from dash.dependencies import Input, Output
import os
import collections
from pprint import pprint
from pymongo.command_cursor import CommandCursor
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from types import *
import pandas as pd
import numpy as np
from pandas import DataFrame
sys.path.append('./Clients')
import folium
from geopy.geocoders import Nominatim
#from sqlalchemy import create_engine
import psycopg2
########################################################################## #############################################################################################################################################
########################################################################## Web Application #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Konstanten
MONGO_URL = os.environ.get('MONGO_URI')
POSTGRESS_URL = os.environ.get('POSTGRES_URL')
HOST_ID = '0.0.0.0'
PORT = '37002'
print('Fasta Server initialisiert!')
def createGraphDataForEscalatorPage(numberOfLastEntries: int):
ergDF = pd.DataFrame(columns=['Datum', 'Anzahl_Ausfälle'])
facilities_collection = facilities.find({})
pandas_facilities = pd.DataFrame(list(facilities_collection))
pandas_facilities = pandas_facilities[['equipmentnumber', 'datetime', 'state']]
facilities_distinct = pandas_facilities
facilities_distinct.columns = ['ID', 'Datum', 'Status']
facilities_distinct['Datum'] = pd.to_datetime(facilities_distinct['Datum'], format="%Y-%m-%d_%H-%M-%S")
facilities_distinct['Datum'] = facilities_distinct['Datum'].dt.strftime('%Y-%m-%d')
facilities_distinct_inactive = facilities_distinct[facilities_distinct['Status'] == 'INACTIVE']
dfOnlyDatetime = pd.DataFrame(facilities_distinct_inactive['Datum'], columns=['Datum']).drop_duplicates()
facilities_distinct_inactive_latestDate = facilities_distinct_inactive.groupby('ID')['Datum'].max()
counter = 0
for index, row in dfOnlyDatetime.iterrows():
counter = 0
for key, value in facilities_distinct_inactive_latestDate.items():
if value == row['Datum']:
counter += 1
ergDF.loc[index] = row['Datum'], counter
ergDF = ergDF.reset_index().drop(['index'], axis=1)
ergDF = ergDF.iloc[-numberOfLastEntries:]
return ergDF
def getDesiredState(listWithStates, state):
stateCounter = 0
for i in listWithStates:
if state == i['state']:
stateCounter += 1
return stateCounter
def getDesiredStateExplanation(listWithStates, state, stateExplanation):
stateExpressionCounter = 0
for i in listWithStates:
if state == i['state'] and stateExplanation == i['stateExplanation']:
stateExpressionCounter += 1
return stateExpressionCounter
def createOverview(givenType: str):
resultOverview = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
listWithStates = []
for i in resultOverview:
listWithStates.append(i)
stateCountACTIVE = getDesiredState(listWithStates, 'ACTIVE')
stateCountINACTIVE = getDesiredState(listWithStates, 'INACTIVE')
stateCountUNKNOWN = getDesiredState(listWithStates, 'UNKNOWN')
return stateCountACTIVE, stateCountINACTIVE, stateCountUNKNOWN
def createReasonsForInactivity(givenType: str):
uniqueList = facilities.distinct("stateExplanation");
resultGruendeFuerInaktivitaet = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
'stateExplanation': {'$last': '$stateExplanation'}
}}
])
listWithStateExplanations = []
for i in resultGruendeFuerInaktivitaet:
listWithStateExplanations.append(i)
dictStateExplanationReason = {}
for i in uniqueList:
count = getDesiredStateExplanation(listWithStateExplanations, 'INACTIVE', str(i))
if count != 0:
dictStateExplanationReason[str(i)] = count
key_array = []
value_array = []
for key, value in dictStateExplanationReason.items():
key_array.append(key)
value_array.append(value)
return key_array, value_array
def createInitialData():
client = pymongo.MongoClient(MONGO_URL, maxPoolSize=50)
dbeva = client.eva_dev
facilities = dbeva['facilities']
# Aufzüge reinladen
conn = psycopg2.connect(host='station-db', user='postgres', password='<PASSWORD>', dbname='eva_dev', port=5432)
cur = conn.cursor()
querry = 'select * from "elevator"'
cur.execute( querry )
stammdaten_liste = cur.fetchall()
aufzüge = pd.DataFrame(stammdaten_liste)
columns = ['ID','Standort Equipment', 'TechnPlatzBezeichng', 'Equipment', 'Equipmentname', 'Ort', 'Wirtschaftseinheit',
'Hersteller',
'Baujahr', 'ANTRIEBSART', 'ANZAHL_HALTESTELLEN', 'ANZAHL_TUEREN_KABINE', 'ANZAHL_TUEREN_SCHACHT',
'FOERDERGESCHWINDIGKEIT',
'FOERDERHOEHE', 'LAGE', 'TRAGKRAFT', 'ERWEITERTE_ORTSANGABE', 'MIN_TUERBREITE', 'KABINENTIEFE',
'KABINENBREITE',
'KABINENHOEHE', 'TUERHOHE', 'FABRIKNUMMER', 'TUERART', 'GEOKOORDINATERECHTSWERT',
'GEOKOORDINATEHOCHWERT', 'AUSFTEXTLICHEBESCHREIBUNG']
aufzüge.columns = columns
aufzüge = aufzüge.drop(0)
aufzüge['Equipment'] = aufzüge['Equipment'].astype(str).astype('int64')
aufzüge = aufzüge.drop_duplicates(['Equipment'])
aufzüge = aufzüge.drop(columns=['ID'])
aufzüge = aufzüge.fillna(value=np.nan)
aufzüge['Baujahr'] = pd.to_numeric(aufzüge['Baujahr'], errors='coerce')
print('Anzahl Aufzüge: ', len(aufzüge))
return facilities, aufzüge
def createMap(givenType: str):
resultCommandCursor = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'description': {'$last': '$description'},
'geocoordX': {'$last': '$geocoordX'},
'geocoordY': {'$last': '$geocoordY'},
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
resultCommandCursor = pd.DataFrame(list(resultCommandCursor))
resultCommandCursor.columns = ['equipmentnumber', 'description', 'geocoordX', 'geocoordY', 'lastStateChangeDate', 'state']
inactive = resultCommandCursor[resultCommandCursor['state'] == 'INACTIVE']
active = resultCommandCursor[resultCommandCursor['state'] == 'ACTIVE']
# Zoom am ausgewählten Ort
geolocator = Nominatim(user_agent="Eva_Dashboard")
return inactive, active, geolocator
#####################################################################
################ Start of Code (create initial data) ################
#####################################################################
facilities, aufzüge = createInitialData()
############################################################
################# Die Aufzüge im Überblick #################
############################################################
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
############################################################
############### Die Rolltreppen im Überblick ###############
############################################################
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
####################################################
###### Gründe für Inaktivität von Fahrstühlen ######
####################################################
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
####################################################
###### Gründe für Inaktivität von Rolltreppen ######
####################################################
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
####################################################
###### Routine zum Aktualisieren der Daten ######
####################################################
def updateValues():
global facilities, aufzüge, elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN
global escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN
global elevator_key_array, elevator_value_array
global escalator_key_array, escalator_value_array
facilities, aufzüge = createInitialData()
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
# Daten werden jede Stunde aktualisiert
scheduler = BlockingScheduler()
scheduler.add_job(updateValues, 'interval', minutes=5)
class UpdateValue(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
scheduler.start()
print('Thread zum Updaten der Werte gestartet!')
tread = UpdateValue()
tread.start()
####################################
###### Wusstest du schon? ######
####################################
# Ältester Aufzug
aeltesteAufzug_datensatz = aufzüge[aufzüge['Baujahr'] == int(aufzüge['Baujahr'].min())]
aeltesteAufzug_ort = aeltesteAufzug_datensatz['Ort'].values[0]
aeltesteAufzug_jahr = int(aeltesteAufzug_datensatz['Baujahr'].values[0])
# Station mit den meisten Aufzügen
uniquelist_orte = aufzüge['Ort'].unique()
df_anzahlProStation = pd.DataFrame(columns=['Ort', 'Anzahl_Aufzüge'])
for i in uniquelist_orte:
tmp = len(aufzüge[aufzüge['Ort'] == i])
df_anzahlProStation.loc[i] = i,tmp
df_anzahlProStation = df_anzahlProStation.sort_values(by=['Anzahl_Aufzüge'], ascending=False)
####################################
###### Aggregierte Werte ######
####################################
# Anzahl Antriebsart
anzahl_seilAufzüge = len(aufzüge[aufzüge['ANTRIEBSART'] == 'SEIL'])
anzahl_hydraulischAufzüge = len(aufzüge[aufzüge['ANTRIEBSART'] == 'HYDRAULISCH'])
# Top Hersteller
uniquelist_hersteller = aufzüge['Hersteller'].unique()
df_anzahlAufzüge = pd.DataFrame(columns=['Hersteller', 'Anzahl_Aufzüge'])
for i in uniquelist_hersteller:
tmp = len(aufzüge[aufzüge['Hersteller'] == i])
df_anzahlAufzüge.loc[i] = i,tmp
df_anzahlAufzüge = df_anzahlAufzüge.sort_values(by=['Anzahl_Aufzüge'], ascending=False)
# Aufälle gesamt
df_anzahlAusfälle = pd.DataFrame(columns=['Aufzug_ID', 'Anzahl_Ausfälle'])
temp_count = facilities.aggregate( [
{ '$match': { 'state': 'INACTIVE' } },
{
'$group': {
'_id': "$equipmentnumber",
'count': { '$sum': 1 }
}
}
] )
for i in temp_count:
df_anzahlAusfälle.loc[i['_id']] = i['_id'], i['count']
df_anzahlAusfälle = df_anzahlAusfälle.sort_values(by=['Anzahl_Ausfälle'], ascending=False)
aufzug_aggregiert, anzahl_aggregiert = df_anzahlAusfälle['Aufzug_ID'].iloc[0], df_anzahlAusfälle['Anzahl_Ausfälle'].iloc[0]
###############################
###### Karte für Aufzüge ######
###############################
inactive, active, geolocator = createMap('ELEVATOR')
###################################
###### Karte für Rolltreppen ######
###################################
escalator_inactive, escalator_active, escalator_geolocator = createMap('ESCALATOR')
###################################
##### Daten für Rolltreppen ######
###################################
graphDataEscalator = createGraphDataForEscalatorPage(14)
####################################
###### APP ######
####################################
# Die Passworter eigentlich aus dem Quellcode-Repository heraushalten und in einer Datei oder einer Datenbank speichern.
VALID_USERNAME_PASSWORD_PAIRS = [
['Josh', '<PASSWORD>'],
['Sophie', '<PASSWORD>'],
['Phil', '<PASSWORD>'],
['Bart', '<PASSWORD>']
]
server = flask.Flask('EVA Dashboard')
app = dash.Dash('EVA Dashboard', server=server)
app.title = 'EVA Dashboard'
auth = dash_auth.BasicAuth(
app,
VALID_USERNAME_PASSWORD_PAIRS
)
# Erklärung:
# Since we're adding callbacks to elements that don't exist in the app.layout, Dash will raise an exception to warn us
# that we might be doing something wrong. In this case, we're adding the elements through a callback, so we can ignore the exception.
app.config.suppress_callback_exceptions = True
###########################################################################################################
###########################################################################################################
####################################### #######################################
####################################### 2. Seite für Rolltreppen #######################################
####################################### #######################################
###########################################################################################################
###########################################################################################################
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content'),
html.Div(dt.DataTable(rows=[{}]), style={'display': 'none'})
])
page_rolltreppen = html.Div(children=[
# Überschrift
html.Div([
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em'},
children='EVA Dashboard'),
]),
# Unterüberschrift
html.Div([
html.Hr(),
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em',
'color': '#000099'}, children='Der Rolltreppenwärter'),
dcc.Markdown('''
**Informationen rund um Rolltreppen in Bahnhöfen der DB Station & Service AG**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000099', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
]),
html.Div([
dcc.Link('Go to Page Aufzüge', href='/page_aufzuege')
], style={'text-align': 'left'}),
# Hauptteil
html.Div([
# Diagramme
html.Div([dcc.Graph(
id='diagramm_status',
figure={
'data': [
{'x': ['aktiv', 'inaktiv', 'keine Information'],
'y': [escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN],
'type': 'bar', 'name': 'Rolltreppen',
'marker': dict(color=['green', 'red', 'orange'])
},
],
'layout': {
'title': 'Die Rolltreppen im Überblick',
'width': '35%',
'align': 'left'
}
}
)], style={'width': '35%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}),
html.Div([dcc.Graph(
id='diagramm_inaktive',
figure={
'data': [
{'values': escalator_value_array, 'type': 'pie', 'name': 'GründeInaktivität',
'marker': dict(colors=['#DCDCDC', '#778899', '#C0C0C0']), 'labels': escalator_key_array
},
],
'layout': {
'title': 'Gründe für Inaktivität',
'width': '35%',
'align': 'right'
}
}
)],
style={'width': '40%', 'text-align': 'right', 'display': 'inline-block', 'padding-left': 10,
'padding-bottom': 10}),
html.Hr(),
html.Div([dcc.Graph(
figure=go.Figure(
data=[
go.Bar(
x=graphDataEscalator['Datum'],
y=graphDataEscalator['Anzahl_Ausfälle'],
name='Anzahl Ausfälle',
marker=go.Marker(
color='rgb(55, 83, 109)'
)
)
],
layout=go.Layout(
title='Anzahl der Ausfälle von Rolltreppen auf Tagesebene',
showlegend=True,
legend=go.Legend(
x=0,
y=1.0
),
margin=go.Margin(l=40, r=0, t=40, b=30)
)
),
style={'height': 300, 'width': 800},
id='escalator_mid_graph'
)], style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}),
html.Hr(),
# unteres Drittel
html.Div([
# Titel
html.Div([
html.H3(style={'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'},
children='Funktionieren die Rolltreppen an deiner Haltestelle? - Finde es heraus!'),
], style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}), ## neu vorher gar nichts
# linker Teil ########################################## geändert alle ids + escalator
html.Div([
html.Div(['Stadt: '],
style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='escalator_stadt_input', value='Frankfurt', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Div(['Bundesland: '],
style={'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='escalator_bundesland_input', value='Hessen', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
dcc.RadioItems(
id='escalator_radio_button',
options=[
{'label': 'Aktive Rolltreppen', 'value': 'aktiv'},
{'label': 'Inaktive Rolltreppen', 'value': 'inaktiv'},
{'label': ' Alle Rolltreppen', 'value': 'beide'}
],
value='inaktiv', style={'margin-left': 10}
),
html.Iframe(id='escalator_karte', srcDoc=open('./projekt/Maps/map_inactive_elevators.html', 'r').read(),
style={'width': '90%', 'height': '30em'})
], style={'width': '49%', 'display': 'inline-block'}),
#style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
# 'padding-left': 140, 'padding-bottom': 10}),
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
# rechter Teil
html.Div([
html.Br(), html.Br(),
html.Div(['Rolltreppen-ID: '],
style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='rolltreppe_id_input', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Hr(),
# Tabelle
html.Div([
dt.DataTable(
rows=[{}],
columns=['Datum_Uhrzeit', 'Status', 'Erklärung des Status'],
editable=False,
row_selectable=False,
filterable=False,
sortable=False,
id='datatable-status-escalator',
selected_row_indices=[],
min_height=250
),
html.Br(),
])
], style={'width': '49%', 'display': 'inline-block', 'vertical-align': 'top'})
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
], style={'margin-left': '20'}),
], style={'background-color': '#E6E6FA'}),
# Fußzeile
html.Div([], style={'height': 70}),
html.Hr(),
html.Div([
dcc.Markdown('''
**THM Friedberg**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}}),
dcc.Markdown('''
**<NAME>, <NAME>, <NAME>, <NAME>**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
], style={'height': 70}),
], style={'marginTop': '2%', 'marginLeft': '5%', 'marginRight': '5%'})
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
page_aufzuege = html.Div(children=[
# Überschrift
html.Div([
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em'},
children='EVA Dashboard'),
]),
# Unterüberschrift
html.Div([
html.Hr(),
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '10em',
'color': '#000099'}, children='Der Aufzugwächter'),
dcc.Markdown('''
**Informationen rund um Aufzüge in Bahnhöfen der DB Station & Service AG**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000099', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
]),
html.Div([
dcc.Link('Go to Page Rolltreppen', href='/page-rolltreppen')
], style={'text-align':'right'}),
# Hauptteil
html.Div([
#Diagramme
html.Div([], style={'width':'10%', 'display': 'inline-block', 'vertical-align':'top'}),
html.Div([
html.Div([ dcc.Graph(
id='diagramm_status',
figure={
'data': [
{'x': ['aktiv', 'inaktiv', 'keine Information'], 'y': [elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN], 'type': 'bar', 'name': 'Aufzüge',
'marker': dict(color=['green', 'red', 'orange'])
},
],
'layout': {
'title': 'Die Aufzüge im Überblick',
'width': '35%',
'align': 'left'
}
}
)], style={'width': '40%', 'display': 'inline-block', 'padding-top': 10, 'padding-bottom': 10}),
html.Div([ dcc.Graph(
id='diagramm_inaktive',
figure={
'data': [
{'values': elevator_value_array, 'type': 'pie', 'name': 'GründeInaktivität',
'marker': dict(colors=['#DCDCDC', '#778899', '#C0C0C0']), 'labels': elevator_key_array
},
],
'layout': {
'title': 'Gründe für Inaktivität',
'width': '35%',
'align': 'right'
}
}
)],
style={'width': '40%', 'display': 'inline-block', 'padding-left': 10, 'padding-bottom': 10}),
], style={'width':'90%', 'margin':'auto', 'display': 'inline-block', 'vertical-align':'top'}),
html.Hr(),
#mittleres Drittel: "Wusstest du schon?", aggregierte Werte etc.
html.Div([]),
html.Div([
html.H3(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'right',
'color': '#000099'}, children='Wusstest du schon?'),
html.Br(),
html.Div('Der älteste Aufzug ist aus dem Jahr {} steht in: {}'.format(aeltesteAufzug_jahr, aeltesteAufzug_ort)),
html.Div(id='aeltester_aufzug', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Div('Die Station mit den meisten Aufzügen ist: {} mit {} Aufzügen'.format(df_anzahlProStation['Ort'].iloc[0], df_anzahlProStation['Anzahl_Aufzüge'].iloc[0])),
#count wie oft eine 'stationnumber' vorkommt, kann dann die mit den meisten dann einer Stadt zugeordnet werden?
html.Div(id='meisten_aufzüge', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Div('Der Aufzug mit den meinste Ausfällen ist {} mit {} Ausfällen'.format(aufzug_aggregiert, anzahl_aggregiert)),
#count wie oft 'inactive' im Status vorkommt
html.Div(id='meiste_ausfälle', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
], style={'display': 'inline-block', 'text-align': 'right', 'width': '45%', 'margin-right':20, 'vertical-align':'top'}),
html.Hr(style={'width': 1, 'height': 200, 'display': 'inline-block'}),
html.Div([
html.H3(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'}, children='Aggregierte Werte'),
html.Div([
html.Div('Antriebsart:'),
html.Br(), html.Br(), html.Br(), html.Br(),
html.Div('Top Hersteller:'),
html.Br(),
], style={'display':'inline-block', 'width': '20%' }),
html.Div([
html.Div('HYDRAULISCH: {} Aufzüge'.format(anzahl_hydraulischAufzüge)),
html.Div('SEIL: {} Aufzüge'.format(anzahl_seilAufzüge)),
html.Br(), html.Br(), html.Br(),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[0], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[0])),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[1], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[1])),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[2], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[2]))
], style={'display':'inline-block', 'width': '80%', 'vertical-align':'top'})
], style={'display': 'inline-block', 'text-align': 'left', 'width': '50%', 'margin-left':20, 'vertical-align':'top'}),
html.Hr(),
#unteres Drittel
html.Div([
#Titel
html.Div([
html.H3(style={'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'}, children='Funktionieren die Aufzüge an deiner Haltestelle? - Finde es heraus!'),
]),
#linker Teil
html.Div([
html.Div(['Stadt: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='stadt_input', value='Frankfurt', type='text', style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Div(['Bundesland: '], style={'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='bundesland_input', value='Hessen', type='text', style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
dcc.RadioItems(
id='radio_button',
options=[
{'label': 'Aktive Aufzüge', 'value': 'aktiv'},
{'label': 'Inaktive Aufzüge', 'value': 'inaktiv'},
{'label': ' Alle Aufzüge', 'value': 'beide'}
],
value='inaktiv', style={'margin-left':10}
),
html.Iframe(id='karte', srcDoc=open('./projekt/Maps/map_inactive_elevators.html', 'r').read(),
style={'width': '90%', 'height': '30em'})
], style={'width': '49%', 'display': 'inline-block'}),
#rechter Teil
html.Div([
html.Br(), html.Br(),
html.Div(['Aufzug-ID: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='aufzug_id_input', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Hr(),
html.Div([
html.Div(['Stationsname: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Beschreibung: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Hersteller: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Antriebsart: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Baujahr: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
], style={'width': '20%', 'display': 'inline-block'}),
html.Div([
html.Div(id='stationsname', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='beschreibung', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='hersteller',style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='antrieb', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='baujahr', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
], style={'width': '80%', 'display': 'inline-block'}),
# Tabelle
html.Div([
dt.DataTable(
rows=[{}],
columns=['Datum_Uhrzeit', 'Status' , 'Erklärung des Status'],
editable=False,
row_selectable=False,
filterable=False,
sortable=False,
id='datatable-status-elevator',
selected_row_indices=[],
min_height=250
),
html.Br(),
])
], style={'width': '49%','display': 'inline-block', 'vertical-align':'top'})
], style={'margin-left':'20'}),
], style = {'background-color': '#E6E6FA'}),
#Fußzeile
html.Div([ ], style={'height':70}),
html.Hr(),
html.Div([
dcc.Markdown('''
**THM Friedberg**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}}),
dcc.Markdown('''
**<NAME>, <NAME>, <NAME>, <NAME>**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style':{'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
], style={'height':70}),
], style={'marginTop': '2%', 'marginLeft': '5%', 'marginRight': '5%'})
########################################################################## #############################################################################################################################################
########################################################################## CALLBACKS #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Callback Karte aktualisieren für Aufzüge
@app.callback(
Output(component_id='karte', component_property='srcDoc'),
[Input(component_id='stadt_input', component_property='value'),
Input(component_id='bundesland_input', component_property='value'),
Input(component_id='radio_button', component_property='value')]
)
def karte_aktualisieren(input_stadt, input_bland, radio_button):
if radio_button == 'aktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
# TODO: Zeitmessung!
for i, row in active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_active_elevators.html')
return open('./projekt/Maps/map_active_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_active_elevators_FFM.html', 'r').read()
elif radio_button == 'inaktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
for i, row in inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_inactive_elevators.html')
return open('./projekt/Maps/map_inactive_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_elevators_FFM.html', 'r').read()
else:
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
for i, row in active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
for i, row in inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_both_elevators.html')
return open('./projekt/Maps/map_both_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_elevators_FFM.html', 'r').read()
######################################################################################################
# Callback Karte aktualisieren für Rolltreppen
@app.callback(
Output(component_id='escalator_karte', component_property='srcDoc'),
[Input(component_id='escalator_stadt_input', component_property='value'),
Input(component_id='escalator_bundesland_input', component_property='value'),
Input(component_id='escalator_radio_button', component_property='value')]
)
def karte_aktualisieren(input_stadt, input_bland, radio_button):
if radio_button == 'aktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_active_escalators.html')
return open('./projekt/Maps/map_active_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_active_escalators_FFM.html', 'r').read()
elif radio_button == 'inaktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_inactive_escalators.html')
return open('./projekt/Maps/map_inactive_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_escalators_FFM.html', 'r').read()
else:
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
for i, row in escalator_inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_both_escalators.html')
return open('./projekt/Maps/map_both_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_escalators_FFM.html', 'r').read()
######################################################################################################
# Callback Stationsname aktualisieren
@app.callback(
Output(component_id='stationsname', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def stationsname_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Ort'].values
return attribute[0]
except:
return str('Aufzug existiert nicht!')
# Callback Hersteller aktualisieren
@app.callback(
Output(component_id='hersteller', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def hersteller_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Hersteller'].values
return attribute[0]
except:
return ''
# Callback Beschreibung aktualisieren
@app.callback(
Output(component_id='beschreibung', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def beschreibung_aktualisieren(input_value):
try:
tmp3 = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = tmp3['Standort Equipment'].values
return attribute[0]
except:
return ''
# Callback Antriebsart aktualisieren
@app.callback(
Output(component_id='antrieb', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def anstriebsart_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['ANTRIEBSART'].values
return attribute[0]
except:
return ''
# Callback Baujahr aktualisieren
@app.callback(
Output(component_id='baujahr', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def baujahr_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Baujahr'].values
return attribute[0]
except:
return ''
# Callback Tabelle aktualisieren
@app.callback(
Output(component_id='datatable-status-elevator', component_property='rows'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def elevator_tabelle_aktualisieren(input_value):
try:
tabellen_input = facilities.find({"type": "ELEVATOR", "equipmentnumber": int(input_value)})
tabellen_input = pd.DataFrame(list(tabellen_input))
tabellen_input = tabellen_input[['datetime', 'state', 'stateExplanation']]
status_tabelle = tabellen_input[::-1]
status_tabelle.columns = ['Datum_Uhrzeit', 'Status', 'Erklärung des Status']
return status_tabelle.to_dict('records')
except:
return [{}]
@app.callback(
Output(component_id='datatable-status-escalator', component_property='rows'),
[Input(component_id='rolltreppe_id_input', component_property='value')]
)
def escalator_tabelle_aktualisieren(input_value):
try:
tabellen_input = facilities.find({"type": "ESCALATOR", "equipmentnumber": int(input_value)})
tabellen_input = pd.DataFrame(list(tabellen_input))
tabellen_input = tabellen_input[['datetime', 'state', 'stateExplanation']]
status_tabelle = tabellen_input[::-1]
status_tabelle.columns = ['Datum_Uhrzeit', 'Status', 'Erklärung des Status']
return status_tabelle.to_dict('records')
except:
return [{}]
#Seite updaten für den Wechsel zwischen Aufzügen und Rolltreppen
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/page-aufzuege':
return page_aufzuege
elif pathname == '/page-rolltreppen':
return page_rolltreppen
else:
return page_aufzuege
if sys.version_info < (3, 0):
sys.exit("Dieses Programm erfordert Python 3.0 und höher")
app.run_server(debug=False, host=HOST_ID, port=PORT)
| [
"psycopg2.connect",
"dash_table_experiments.DataTable",
"flask.Flask",
"dash_core_components.Location",
"dash_html_components.H3",
"dash.dependencies.Input",
"sys.exit",
"pymongo.MongoClient",
"sys.path.append",
"pandas.to_datetime",
"dash_html_components.Div",
"dash.Dash",
"threading.Thread... | [((1396, 1424), 'sys.path.append', 'sys.path.append', (['"""./Clients"""'], {}), "('./Clients')\n", (1411, 1424), False, 'import sys\n'), ((2257, 2284), 'os.environ.get', 'os.environ.get', (['"""MONGO_URI"""'], {}), "('MONGO_URI')\n", (2271, 2284), False, 'import os\n'), ((2301, 2331), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_URL"""'], {}), "('POSTGRES_URL')\n", (2315, 2331), False, 'import os\n'), ((10693, 10712), 'apscheduler.schedulers.blocking.BlockingScheduler', 'BlockingScheduler', ([], {}), '()\n', (10710, 10712), False, 'from apscheduler.schedulers.blocking import BlockingScheduler\n'), ((11470, 11517), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Ort', 'Anzahl_Aufzüge']"}), "(columns=['Ort', 'Anzahl_Aufzüge'])\n", (11482, 11517), True, 'import pandas as pd\n'), ((12100, 12154), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Hersteller', 'Anzahl_Aufzüge']"}), "(columns=['Hersteller', 'Anzahl_Aufzüge'])\n", (12112, 12154), True, 'import pandas as pd\n'), ((12402, 12456), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Aufzug_ID', 'Anzahl_Ausfälle']"}), "(columns=['Aufzug_ID', 'Anzahl_Ausfälle'])\n", (12414, 12456), True, 'import pandas as pd\n'), ((13861, 13889), 'flask.Flask', 'flask.Flask', (['"""EVA Dashboard"""'], {}), "('EVA Dashboard')\n", (13872, 13889), False, 'import flask\n'), ((13897, 13938), 'dash.Dash', 'dash.Dash', (['"""EVA Dashboard"""'], {'server': 'server'}), "('EVA Dashboard', server=server)\n", (13906, 13938), False, 'import dash\n'), ((13974, 14029), 'dash_auth.BasicAuth', 'dash_auth.BasicAuth', (['app', 'VALID_USERNAME_PASSWORD_PAIRS'], {}), '(app, VALID_USERNAME_PASSWORD_PAIRS)\n', (13993, 14029), False, 'import dash_auth\n'), ((2482, 2532), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Datum', 'Anzahl_Ausfälle']"}), "(columns=['Datum', 'Anzahl_Ausfälle'])\n", (2494, 2532), True, 'import pandas as pd\n'), ((2874, 2946), 'pandas.to_datetime', 'pd.to_datetime', (["facilities_distinct['Datum']"], {'format': '"""%Y-%m-%d_%H-%M-%S"""'}), "(facilities_distinct['Datum'], format='%Y-%m-%d_%H-%M-%S')\n", (2888, 2946), True, 'import pandas as pd\n'), ((5946, 5992), 'pymongo.MongoClient', 'pymongo.MongoClient', (['MONGO_URL'], {'maxPoolSize': '(50)'}), '(MONGO_URL, maxPoolSize=50)\n', (5965, 5992), False, 'import pymongo\n'), ((6093, 6201), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': '"""station-db"""', 'user': '"""postgres"""', 'password': '"""<PASSWORD>"""', 'dbname': '"""eva_dev"""', 'port': '(5432)'}), "(host='station-db', user='postgres', password='<PASSWORD>',\n dbname='eva_dev', port=5432)\n", (6109, 6201), False, 'import psycopg2\n'), ((6344, 6374), 'pandas.DataFrame', 'pd.DataFrame', (['stammdaten_liste'], {}), '(stammdaten_liste)\n', (6356, 6374), True, 'import pandas as pd\n'), ((7292, 7342), 'pandas.to_numeric', 'pd.to_numeric', (["aufzüge['Baujahr']"], {'errors': '"""coerce"""'}), "(aufzüge['Baujahr'], errors='coerce')\n", (7305, 7342), True, 'import pandas as pd\n'), ((8275, 8312), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""Eva_Dashboard"""'}), "(user_agent='Eva_Dashboard')\n", (8284, 8312), False, 'from geopy.geocoders import Nominatim\n'), ((37496, 37553), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""karte"""', 'component_property': '"""srcDoc"""'}), "(component_id='karte', component_property='srcDoc')\n", (37502, 37553), False, 'from dash.dependencies import Input, Output\n'), ((41647, 41714), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""escalator_karte"""', 'component_property': '"""srcDoc"""'}), "(component_id='escalator_karte', component_property='srcDoc')\n", (41653, 41714), False, 'from dash.dependencies import Input, Output\n'), ((45821, 45887), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""stationsname"""', 'component_property': '"""children"""'}), "(component_id='stationsname', component_property='children')\n", (45827, 45887), False, 'from dash.dependencies import Input, Output\n'), ((46269, 46333), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""hersteller"""', 'component_property': '"""children"""'}), "(component_id='hersteller', component_property='children')\n", (46275, 46333), False, 'from dash.dependencies import Input, Output\n'), ((46694, 46760), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""beschreibung"""', 'component_property': '"""children"""'}), "(component_id='beschreibung', component_property='children')\n", (46700, 46760), False, 'from dash.dependencies import Input, Output\n'), ((47126, 47187), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""antrieb"""', 'component_property': '"""children"""'}), "(component_id='antrieb', component_property='children')\n", (47132, 47187), False, 'from dash.dependencies import Input, Output\n'), ((47546, 47607), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""baujahr"""', 'component_property': '"""children"""'}), "(component_id='baujahr', component_property='children')\n", (47552, 47607), False, 'from dash.dependencies import Input, Output\n'), ((47957, 48032), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""datatable-status-elevator"""', 'component_property': '"""rows"""'}), "(component_id='datatable-status-elevator', component_property='rows')\n", (47963, 48032), False, 'from dash.dependencies import Input, Output\n'), ((48644, 48720), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""datatable-status-escalator"""', 'component_property': '"""rows"""'}), "(component_id='datatable-status-escalator', component_property='rows')\n", (48650, 48720), False, 'from dash.dependencies import Input, Output\n'), ((49398, 49450), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (49422, 49450), False, 'import dash\n'), ((49754, 49812), 'sys.exit', 'sys.exit', (['"""Dieses Programm erfordert Python 3.0 und höher"""'], {}), "('Dieses Programm erfordert Python 3.0 und höher')\n", (49762, 49812), False, 'import sys\n'), ((10842, 10873), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (10867, 10873), False, 'import threading\n'), ((15142, 15179), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""url"""', 'refresh': '(False)'}), "(id='url', refresh=False)\n", (15154, 15179), True, 'import dash_core_components as dcc\n'), ((15185, 15212), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""page-content"""'}), "(id='page-content')\n", (15193, 15212), True, 'import dash_html_components as html\n'), ((37560, 37621), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""stadt_input"""', 'component_property': '"""value"""'}), "(component_id='stadt_input', component_property='value')\n", (37565, 37621), False, 'from dash.dependencies import Input, Output\n'), ((37628, 37694), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""bundesland_input"""', 'component_property': '"""value"""'}), "(component_id='bundesland_input', component_property='value')\n", (37633, 37694), False, 'from dash.dependencies import Input, Output\n'), ((37701, 37763), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""radio_button"""', 'component_property': '"""value"""'}), "(component_id='radio_button', component_property='value')\n", (37706, 37763), False, 'from dash.dependencies import Input, Output\n'), ((41721, 41792), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""escalator_stadt_input"""', 'component_property': '"""value"""'}), "(component_id='escalator_stadt_input', component_property='value')\n", (41726, 41792), False, 'from dash.dependencies import Input, Output\n'), ((41799, 41875), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""escalator_bundesland_input"""', 'component_property': '"""value"""'}), "(component_id='escalator_bundesland_input', component_property='value')\n", (41804, 41875), False, 'from dash.dependencies import Input, Output\n'), ((41882, 41954), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""escalator_radio_button"""', 'component_property': '"""value"""'}), "(component_id='escalator_radio_button', component_property='value')\n", (41887, 41954), False, 'from dash.dependencies import Input, Output\n'), ((45894, 45959), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""aufzug_id_input"""', 'component_property': '"""value"""'}), "(component_id='aufzug_id_input', component_property='value')\n", (45899, 45959), False, 'from dash.dependencies import Input, Output\n'), ((46340, 46405), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""aufzug_id_input"""', 'component_property': '"""value"""'}), "(component_id='aufzug_id_input', component_property='value')\n", (46345, 46405), False, 'from dash.dependencies import Input, Output\n'), ((46767, 46832), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""aufzug_id_input"""', 'component_property': '"""value"""'}), "(component_id='aufzug_id_input', component_property='value')\n", (46772, 46832), False, 'from dash.dependencies import Input, Output\n'), ((47194, 47259), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""aufzug_id_input"""', 'component_property': '"""value"""'}), "(component_id='aufzug_id_input', component_property='value')\n", (47199, 47259), False, 'from dash.dependencies import Input, Output\n'), ((47614, 47679), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""aufzug_id_input"""', 'component_property': '"""value"""'}), "(component_id='aufzug_id_input', component_property='value')\n", (47619, 47679), False, 'from dash.dependencies import Input, Output\n'), ((48039, 48104), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""aufzug_id_input"""', 'component_property': '"""value"""'}), "(component_id='aufzug_id_input', component_property='value')\n", (48044, 48104), False, 'from dash.dependencies import Input, Output\n'), ((48727, 48796), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""rolltreppe_id_input"""', 'component_property': '"""value"""'}), "(component_id='rolltreppe_id_input', component_property='value')\n", (48732, 48796), False, 'from dash.dependencies import Input, Output\n'), ((49467, 49509), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (49490, 49509), False, 'import dash\n'), ((3158, 3228), 'pandas.DataFrame', 'pd.DataFrame', (["facilities_distinct_inactive['Datum']"], {'columns': "['Datum']"}), "(facilities_distinct_inactive['Datum'], columns=['Datum'])\n", (3170, 3228), True, 'import pandas as pd\n'), ((15227, 15250), 'dash_table_experiments.DataTable', 'dt.DataTable', ([], {'rows': '[{}]'}), '(rows=[{}])\n', (15239, 15250), True, 'import dash_table_experiments as dt\n'), ((23405, 23439), 'dash_html_components.Div', 'html.Div', (['[]'], {'style': "{'height': 70}"}), "([], style={'height': 70})\n", (23413, 23439), True, 'import dash_html_components as html\n'), ((23445, 23454), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (23452, 23454), True, 'import dash_html_components as html\n'), ((35856, 35890), 'dash_html_components.Div', 'html.Div', (['[]'], {'style': "{'height': 70}"}), "([], style={'height': 70})\n", (35864, 35890), True, 'import dash_html_components as html\n'), ((35896, 35905), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (35903, 35905), True, 'import dash_html_components as html\n'), ((38031, 38106), 'folium.Map', 'folium.Map', ([], {'location': '[location.latitude, location.longitude]', 'zoom_start': '(10)'}), '(location=[location.latitude, location.longitude], zoom_start=10)\n', (38041, 38106), False, 'import folium\n'), ((42230, 42305), 'folium.Map', 'folium.Map', ([], {'location': '[location.latitude, location.longitude]', 'zoom_start': '(10)'}), '(location=[location.latitude, location.longitude], zoom_start=10)\n', (42240, 42305), False, 'import folium\n'), ((39102, 39177), 'folium.Map', 'folium.Map', ([], {'location': '[location.latitude, location.longitude]', 'zoom_start': '(10)'}), '(location=[location.latitude, location.longitude], zoom_start=10)\n', (39112, 39177), False, 'import folium\n'), ((40120, 40195), 'folium.Map', 'folium.Map', ([], {'location': '[location.latitude, location.longitude]', 'zoom_start': '(10)'}), '(location=[location.latitude, location.longitude], zoom_start=10)\n', (40130, 40195), False, 'import folium\n'), ((43280, 43355), 'folium.Map', 'folium.Map', ([], {'location': '[location.latitude, location.longitude]', 'zoom_start': '(10)'}), '(location=[location.latitude, location.longitude], zoom_start=10)\n', (43290, 43355), False, 'import folium\n'), ((44310, 44385), 'folium.Map', 'folium.Map', ([], {'location': '[location.latitude, location.longitude]', 'zoom_start': '(10)'}), '(location=[location.latitude, location.longitude], zoom_start=10)\n', (44320, 44385), False, 'import folium\n'), ((15364, 15497), 'dash_html_components.H1', 'html.H1', ([], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center',\n 'width': '15em'}", 'children': '"""EVA Dashboard"""'}), "(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align':\n 'center', 'width': '15em'}, children='EVA Dashboard')\n", (15371, 15497), True, 'import dash_html_components as html\n'), ((15566, 15575), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (15573, 15575), True, 'import dash_html_components as html\n'), ((15586, 15752), 'dash_html_components.H1', 'html.H1', ([], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center',\n 'width': '15em', 'color': '#000099'}", 'children': '"""Der Rolltreppenwärter"""'}), "(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align':\n 'center', 'width': '15em', 'color': '#000099'}, children=\n 'Der Rolltreppenwärter')\n", (15593, 15752), True, 'import dash_html_components as html\n'), ((16207, 16260), 'dash_core_components.Link', 'dcc.Link', (['"""Go to Page Aufzüge"""'], {'href': '"""/page_aufzuege"""'}), "('Go to Page Aufzüge', href='/page_aufzuege')\n", (16215, 16260), True, 'import dash_core_components as dcc\n'), ((17895, 17904), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (17902, 17904), True, 'import dash_html_components as html\n'), ((18974, 18983), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (18981, 18983), True, 'import dash_html_components as html\n'), ((25311, 25444), 'dash_html_components.H1', 'html.H1', ([], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center',\n 'width': '15em'}", 'children': '"""EVA Dashboard"""'}), "(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align':\n 'center', 'width': '15em'}, children='EVA Dashboard')\n", (25318, 25444), True, 'import dash_html_components as html\n'), ((25529, 25538), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (25536, 25538), True, 'import dash_html_components as html\n'), ((25553, 25715), 'dash_html_components.H1', 'html.H1', ([], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center',\n 'width': '10em', 'color': '#000099'}", 'children': '"""Der Aufzugwächter"""'}), "(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align':\n 'center', 'width': '10em', 'color': '#000099'}, children=\n 'Der Aufzugwächter')\n", (25560, 25715), True, 'import dash_html_components as html\n'), ((26202, 26262), 'dash_core_components.Link', 'dcc.Link', (['"""Go to Page Rolltreppen"""'], {'href': '"""/page-rolltreppen"""'}), "('Go to Page Rolltreppen', href='/page-rolltreppen')\n", (26210, 26262), True, 'import dash_core_components as dcc\n'), ((26381, 26473), 'dash_html_components.Div', 'html.Div', (['[]'], {'style': "{'width': '10%', 'display': 'inline-block', 'vertical-align': 'top'}"}), "([], style={'width': '10%', 'display': 'inline-block',\n 'vertical-align': 'top'})\n", (26389, 26473), True, 'import dash_html_components as html\n'), ((28128, 28137), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (28135, 28137), True, 'import dash_html_components as html\n'), ((28221, 28233), 'dash_html_components.Div', 'html.Div', (['[]'], {}), '([])\n', (28229, 28233), True, 'import dash_html_components as html\n'), ((29654, 29723), 'dash_html_components.Hr', 'html.Hr', ([], {'style': "{'width': 1, 'height': 200, 'display': 'inline-block'}"}), "(style={'width': 1, 'height': 200, 'display': 'inline-block'})\n", (29661, 29723), True, 'import dash_html_components as html\n'), ((31053, 31062), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (31060, 31062), True, 'import dash_html_components as html\n'), ((28266, 28406), 'dash_html_components.H3', 'html.H3', ([], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'right',\n 'color': '#000099'}", 'children': '"""Wusstest du schon?"""'}), "(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align':\n 'right', 'color': '#000099'}, children='Wusstest du schon?')\n", (28273, 28406), True, 'import dash_html_components as html\n'), ((28444, 28453), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (28451, 28453), True, 'import dash_html_components as html\n'), ((28592, 28709), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""aeltester_aufzug"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='aeltester_aufzug', style={'margin-left': 'auto',\n 'margin-right': 'auto', 'display': 'inline-block'})\n", (28600, 28709), True, 'import dash_html_components as html\n'), ((28719, 28728), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (28726, 28728), True, 'import dash_html_components as html\n'), ((29042, 29158), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""meisten_aufzüge"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='meisten_aufzüge', style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (29050, 29158), True, 'import dash_html_components as html\n'), ((29168, 29177), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (29175, 29177), True, 'import dash_html_components as html\n'), ((29379, 29495), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""meiste_ausfälle"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='meiste_ausfälle', style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (29387, 29495), True, 'import dash_html_components as html\n'), ((29505, 29514), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (29512, 29514), True, 'import dash_html_components as html\n'), ((29757, 29895), 'dash_html_components.H3', 'html.H3', ([], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'left',\n 'color': '#000099'}", 'children': '"""Aggregierte Werte"""'}), "(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align':\n 'left', 'color': '#000099'}, children='Aggregierte Werte')\n", (29764, 29895), True, 'import dash_html_components as html\n'), ((19090, 19264), 'dash_html_components.H3', 'html.H3', ([], {'style': "{'margin-right': 'auto', 'text-align': 'left', 'color': '#000099'}", 'children': '"""Funktionieren die Rolltreppen an deiner Haltestelle? - Finde es heraus!"""'}), "(style={'margin-right': 'auto', 'text-align': 'left', 'color':\n '#000099'}, children=\n 'Funktionieren die Rolltreppen an deiner Haltestelle? - Finde es heraus!')\n", (19097, 19264), True, 'import dash_html_components as html\n'), ((19650, 19758), 'dash_html_components.Div', 'html.Div', (["['Stadt: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Stadt: '], style={'margin-left': 'auto', 'margin-right': 'auto',\n 'display': 'inline-block'})\n", (19658, 19758), True, 'import dash_html_components as html\n'), ((19797, 19950), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""escalator_stadt_input"""', 'value': '"""Frankfurt"""', 'type': '"""text"""', 'style': "{'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='escalator_stadt_input', value='Frankfurt', type='text', style\n ={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'})\n", (19806, 19950), True, 'import dash_core_components as dcc\n'), ((19989, 20100), 'dash_html_components.Div', 'html.Div', (["['Bundesland: ']"], {'style': "{'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Bundesland: '], style={'margin-left': '15', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (19997, 20100), True, 'import dash_html_components as html\n'), ((20139, 20297), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""escalator_bundesland_input"""', 'value': '"""Hessen"""', 'type': '"""text"""', 'style': "{'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='escalator_bundesland_input', value='Hessen', type='text',\n style={'margin-left': '5', 'margin-right': 'auto', 'display':\n 'inline-block'})\n", (20148, 20297), True, 'import dash_core_components as dcc\n'), ((20333, 20342), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (20340, 20342), True, 'import dash_html_components as html\n'), ((20344, 20353), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (20351, 20353), True, 'import dash_html_components as html\n'), ((20371, 20641), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""escalator_radio_button"""', 'options': "[{'label': 'Aktive Rolltreppen', 'value': 'aktiv'}, {'label':\n 'Inaktive Rolltreppen', 'value': 'inaktiv'}, {'label':\n ' Alle Rolltreppen', 'value': 'beide'}]", 'value': '"""inaktiv"""', 'style': "{'margin-left': 10}"}), "(id='escalator_radio_button', options=[{'label':\n 'Aktive Rolltreppen', 'value': 'aktiv'}, {'label':\n 'Inaktive Rolltreppen', 'value': 'inaktiv'}, {'label':\n ' Alle Rolltreppen', 'value': 'beide'}], value='inaktiv', style={\n 'margin-left': 10})\n", (20385, 20641), True, 'import dash_core_components as dcc\n'), ((21750, 21759), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (21757, 21759), True, 'import dash_html_components as html\n'), ((21761, 21770), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (21768, 21770), True, 'import dash_html_components as html\n'), ((21788, 21905), 'dash_html_components.Div', 'html.Div', (["['Rolltreppen-ID: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Rolltreppen-ID: '], style={'margin-left': 'auto',\n 'margin-right': 'auto', 'display': 'inline-block'})\n", (21796, 21905), True, 'import dash_html_components as html\n'), ((21944, 22075), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""rolltreppe_id_input"""', 'type': '"""text"""', 'style': "{'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='rolltreppe_id_input', type='text', style={'margin-left': '5',\n 'margin-right': 'auto', 'display': 'inline-block'})\n", (21953, 22075), True, 'import dash_core_components as dcc\n'), ((22115, 22124), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (22122, 22124), True, 'import dash_html_components as html\n'), ((22142, 22151), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (22149, 22151), True, 'import dash_html_components as html\n'), ((29963, 29987), 'dash_html_components.Div', 'html.Div', (['"""Antriebsart:"""'], {}), "('Antriebsart:')\n", (29971, 29987), True, 'import dash_html_components as html\n'), ((30005, 30014), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30012, 30014), True, 'import dash_html_components as html\n'), ((30016, 30025), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30023, 30025), True, 'import dash_html_components as html\n'), ((30027, 30036), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30034, 30036), True, 'import dash_html_components as html\n'), ((30038, 30047), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30045, 30047), True, 'import dash_html_components as html\n'), ((30065, 30092), 'dash_html_components.Div', 'html.Div', (['"""Top Hersteller:"""'], {}), "('Top Hersteller:')\n", (30073, 30092), True, 'import dash_html_components as html\n'), ((30110, 30119), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30117, 30119), True, 'import dash_html_components as html\n'), ((30387, 30396), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30394, 30396), True, 'import dash_html_components as html\n'), ((30398, 30407), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30405, 30407), True, 'import dash_html_components as html\n'), ((30409, 30418), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (30416, 30418), True, 'import dash_html_components as html\n'), ((31167, 31337), 'dash_html_components.H3', 'html.H3', ([], {'style': "{'margin-right': 'auto', 'text-align': 'left', 'color': '#000099'}", 'children': '"""Funktionieren die Aufzüge an deiner Haltestelle? - Finde es heraus!"""'}), "(style={'margin-right': 'auto', 'text-align': 'left', 'color':\n '#000099'}, children=\n 'Funktionieren die Aufzüge an deiner Haltestelle? - Finde es heraus!')\n", (31174, 31337), True, 'import dash_html_components as html\n'), ((31438, 31546), 'dash_html_components.Div', 'html.Div', (["['Stadt: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Stadt: '], style={'margin-left': 'auto', 'margin-right': 'auto',\n 'display': 'inline-block'})\n", (31446, 31546), True, 'import dash_html_components as html\n'), ((31560, 31703), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""stadt_input"""', 'value': '"""Frankfurt"""', 'type': '"""text"""', 'style': "{'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='stadt_input', value='Frankfurt', type='text', style={\n 'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'})\n", (31569, 31703), True, 'import dash_core_components as dcc\n'), ((31716, 31827), 'dash_html_components.Div', 'html.Div', (["['Bundesland: ']"], {'style': "{'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Bundesland: '], style={'margin-left': '15', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (31724, 31827), True, 'import dash_html_components as html\n'), ((31841, 31986), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""bundesland_input"""', 'value': '"""Hessen"""', 'type': '"""text"""', 'style': "{'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='bundesland_input', value='Hessen', type='text', style={\n 'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'})\n", (31850, 31986), True, 'import dash_core_components as dcc\n'), ((31999, 32008), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (32006, 32008), True, 'import dash_html_components as html\n'), ((32010, 32019), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (32017, 32019), True, 'import dash_html_components as html\n'), ((32037, 32282), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""radio_button"""', 'options': "[{'label': 'Aktive Aufzüge', 'value': 'aktiv'}, {'label':\n 'Inaktive Aufzüge', 'value': 'inaktiv'}, {'label': ' Alle Aufzüge',\n 'value': 'beide'}]", 'value': '"""inaktiv"""', 'style': "{'margin-left': 10}"}), "(id='radio_button', options=[{'label': 'Aktive Aufzüge',\n 'value': 'aktiv'}, {'label': 'Inaktive Aufzüge', 'value': 'inaktiv'}, {\n 'label': ' Alle Aufzüge', 'value': 'beide'}], value='inaktiv', style={\n 'margin-left': 10})\n", (32051, 32282), True, 'import dash_core_components as dcc\n'), ((32755, 32764), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (32762, 32764), True, 'import dash_html_components as html\n'), ((32766, 32775), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (32773, 32775), True, 'import dash_html_components as html\n'), ((32793, 32905), 'dash_html_components.Div', 'html.Div', (["['Aufzug-ID: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Aufzug-ID: '], style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (32801, 32905), True, 'import dash_html_components as html\n'), ((32919, 33046), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""aufzug_id_input"""', 'type': '"""text"""', 'style': "{'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='aufzug_id_input', type='text', style={'margin-left': '5',\n 'margin-right': 'auto', 'display': 'inline-block'})\n", (32928, 33046), True, 'import dash_core_components as dcc\n'), ((33086, 33095), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33093, 33095), True, 'import dash_html_components as html\n'), ((33113, 33122), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (33120, 33122), True, 'import dash_html_components as html\n'), ((38586, 38630), 'folium.Icon', 'folium.Icon', ([], {'color': '"""green"""', 'icon': '"""info-sign"""'}), "(color='green', icon='info-sign')\n", (38597, 38630), False, 'import folium\n'), ((42764, 42808), 'folium.Icon', 'folium.Icon', ([], {'color': '"""green"""', 'icon': '"""info-sign"""'}), "(color='green', icon='info-sign')\n", (42775, 42808), False, 'import folium\n'), ((39626, 39668), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""', 'icon': '"""info-sign"""'}), "(color='red', icon='info-sign')\n", (39637, 39668), False, 'import folium\n'), ((40642, 40686), 'folium.Icon', 'folium.Icon', ([], {'color': '"""green"""', 'icon': '"""info-sign"""'}), "(color='green', icon='info-sign')\n", (40653, 40686), False, 'import folium\n'), ((41167, 41209), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""', 'icon': '"""info-sign"""'}), "(color='red', icon='info-sign')\n", (41178, 41209), False, 'import folium\n'), ((43816, 43858), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""', 'icon': '"""info-sign"""'}), "(color='red', icon='info-sign')\n", (43827, 43858), False, 'import folium\n'), ((44844, 44888), 'folium.Icon', 'folium.Icon', ([], {'color': '"""green"""', 'icon': '"""info-sign"""'}), "(color='green', icon='info-sign')\n", (44855, 44888), False, 'import folium\n'), ((45360, 45402), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""', 'icon': '"""info-sign"""'}), "(color='red', icon='info-sign')\n", (45371, 45402), False, 'import folium\n'), ((22227, 22469), 'dash_table_experiments.DataTable', 'dt.DataTable', ([], {'rows': '[{}]', 'columns': "['Datum_Uhrzeit', 'Status', 'Erklärung des Status']", 'editable': '(False)', 'row_selectable': '(False)', 'filterable': '(False)', 'sortable': '(False)', 'id': '"""datatable-status-escalator"""', 'selected_row_indices': '[]', 'min_height': '(250)'}), "(rows=[{}], columns=['Datum_Uhrzeit', 'Status',\n 'Erklärung des Status'], editable=False, row_selectable=False,\n filterable=False, sortable=False, id='datatable-status-escalator',\n selected_row_indices=[], min_height=250)\n", (22239, 22469), True, 'import dash_table_experiments as dt\n'), ((22718, 22727), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (22725, 22727), True, 'import dash_html_components as html\n'), ((33171, 33286), 'dash_html_components.Div', 'html.Div', (["['Stationsname: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Stationsname: '], style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (33179, 33286), True, 'import dash_html_components as html\n'), ((33304, 33313), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33311, 33313), True, 'import dash_html_components as html\n'), ((33315, 33324), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33322, 33324), True, 'import dash_html_components as html\n'), ((33346, 33461), 'dash_html_components.Div', 'html.Div', (["['Beschreibung: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Beschreibung: '], style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (33354, 33461), True, 'import dash_html_components as html\n'), ((33479, 33488), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33486, 33488), True, 'import dash_html_components as html\n'), ((33490, 33499), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33497, 33499), True, 'import dash_html_components as html\n'), ((33521, 33634), 'dash_html_components.Div', 'html.Div', (["['Hersteller: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Hersteller: '], style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (33529, 33634), True, 'import dash_html_components as html\n'), ((33652, 33661), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33659, 33661), True, 'import dash_html_components as html\n'), ((33663, 33672), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33670, 33672), True, 'import dash_html_components as html\n'), ((33694, 33808), 'dash_html_components.Div', 'html.Div', (["['Antriebsart: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Antriebsart: '], style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (33702, 33808), True, 'import dash_html_components as html\n'), ((33826, 33835), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33833, 33835), True, 'import dash_html_components as html\n'), ((33837, 33846), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (33844, 33846), True, 'import dash_html_components as html\n'), ((33868, 33978), 'dash_html_components.Div', 'html.Div', (["['Baujahr: ']"], {'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(['Baujahr: '], style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (33876, 33978), True, 'import dash_html_components as html\n'), ((33996, 34005), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34003, 34005), True, 'import dash_html_components as html\n'), ((34007, 34016), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34014, 34016), True, 'import dash_html_components as html\n'), ((34136, 34249), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""stationsname"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='stationsname', style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (34144, 34249), True, 'import dash_html_components as html\n'), ((34267, 34276), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34274, 34276), True, 'import dash_html_components as html\n'), ((34278, 34287), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34285, 34287), True, 'import dash_html_components as html\n'), ((34309, 34422), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""beschreibung"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='beschreibung', style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (34317, 34422), True, 'import dash_html_components as html\n'), ((34440, 34449), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34447, 34449), True, 'import dash_html_components as html\n'), ((34451, 34460), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34458, 34460), True, 'import dash_html_components as html\n'), ((34482, 34593), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""hersteller"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='hersteller', style={'margin-left': 'auto', 'margin-right':\n 'auto', 'display': 'inline-block'})\n", (34490, 34593), True, 'import dash_html_components as html\n'), ((34610, 34619), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34617, 34619), True, 'import dash_html_components as html\n'), ((34621, 34630), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34628, 34630), True, 'import dash_html_components as html\n'), ((34652, 34760), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""antrieb"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='antrieb', style={'margin-left': 'auto', 'margin-right': 'auto',\n 'display': 'inline-block'})\n", (34660, 34760), True, 'import dash_html_components as html\n'), ((34778, 34787), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34785, 34787), True, 'import dash_html_components as html\n'), ((34789, 34798), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34796, 34798), True, 'import dash_html_components as html\n'), ((34820, 34928), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""baujahr"""', 'style': "{'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}"}), "(id='baujahr', style={'margin-left': 'auto', 'margin-right': 'auto',\n 'display': 'inline-block'})\n", (34828, 34928), True, 'import dash_html_components as html\n'), ((34946, 34955), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34953, 34955), True, 'import dash_html_components as html\n'), ((34957, 34966), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (34964, 34966), True, 'import dash_html_components as html\n'), ((35113, 35354), 'dash_table_experiments.DataTable', 'dt.DataTable', ([], {'rows': '[{}]', 'columns': "['Datum_Uhrzeit', 'Status', 'Erklärung des Status']", 'editable': '(False)', 'row_selectable': '(False)', 'filterable': '(False)', 'sortable': '(False)', 'id': '"""datatable-status-elevator"""', 'selected_row_indices': '[]', 'min_height': '(250)'}), "(rows=[{}], columns=['Datum_Uhrzeit', 'Status',\n 'Erklärung des Status'], editable=False, row_selectable=False,\n filterable=False, sortable=False, id='datatable-status-elevator',\n selected_row_indices=[], min_height=250)\n", (35125, 35354), True, 'import dash_table_experiments as dt\n'), ((35604, 35613), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (35611, 35613), True, 'import dash_html_components as html\n'), ((18526, 18547), 'plotly.graph_objs.Legend', 'go.Legend', ([], {'x': '(0)', 'y': '(1.0)'}), '(x=0, y=1.0)\n', (18535, 18547), True, 'import plotly.graph_objs as go\n'), ((18646, 18678), 'plotly.graph_objs.Margin', 'go.Margin', ([], {'l': '(40)', 'r': '(0)', 't': '(40)', 'b': '(30)'}), '(l=40, r=0, t=40, b=30)\n', (18655, 18678), True, 'import plotly.graph_objs as go\n'), ((18216, 18251), 'plotly.graph_objs.Marker', 'go.Marker', ([], {'color': '"""rgb(55, 83, 109)"""'}), "(color='rgb(55, 83, 109)')\n", (18225, 18251), True, 'import plotly.graph_objs as go\n')] |
# Trinket IO demo
# Welcome to CircuitPython 2.0.0 :)
import board
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogOut, AnalogIn
import touchio
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
import adafruit_dotstar as dotstar
import time
import neopixel
from busio import I2C
from board import SCL, SDA
import adafruit_sht31d
i2c = I2C(SCL, SDA)
sensor = adafruit_sht31d.SHT31D(i2c)
# # Built in red LED
# led = DigitalInOut(board.D13)
# led.direction = Direction.OUTPUT
# # Analog input on D0
# analog1in = AnalogIn(board.D0)
# # Analog output on D1
# aout = AnalogOut(board.D1)
# # Digital input with pullup on D2
# button = DigitalInOut(board.D2)
# button.direction = Direction.INPUT
# button.pull = Pull.UP
# # Used if we do HID output, see below
# kbd = Keyboard()
######################### HELPERS ##############################
# # Helper to convert analog input to voltage
# def getVoltage(pin):
# return (pin.value * 3.3) / 65536
######################### MAIN LOOP ##############################
averages = 1
# report_time = 0.0
# loop_time = report_time/averages
i = 0
temperature = 0
relative_humidity = 0
while True:
temperature += sensor.temperature
relative_humidity += sensor.relative_humidity
if i == averages - 1:
temperature /= averages
relative_humidity /= averages
output = ""
output += '{'
output += ' "guid": "btrn-tmp-sensor-0001",'
output += ' "temperature": %0.2f,' % sensor.temperature
output += ' "relative_humidity": %0.2f,' % sensor.relative_humidity
output += '}'
print(output)
temperature = 0
relative_humidity = 0
i = (i + 1) % averages
# time.sleep(loop_time)
| [
"adafruit_sht31d.SHT31D",
"busio.I2C"
] | [((405, 418), 'busio.I2C', 'I2C', (['SCL', 'SDA'], {}), '(SCL, SDA)\n', (408, 418), False, 'from busio import I2C\n'), ((429, 456), 'adafruit_sht31d.SHT31D', 'adafruit_sht31d.SHT31D', (['i2c'], {}), '(i2c)\n', (451, 456), False, 'import adafruit_sht31d\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the storage media RAW image support helper functions."""
import unittest
from dfvfs.lib import raw
from dfvfs.lib import definitions
from dfvfs.path import fake_path_spec
from dfvfs.path import raw_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import fake_file_system
class GlobRawFileTest(unittest.TestCase):
"""The unit test for the storage media RAW image file glob functionality."""
def _BuildFileFakeFileSystem(
self, segment_filenames, segment_file_path_specs):
"""Builds a fake file system containing storage media RAW segment files.
Args:
filename: the filename of the first segment file with extension.
segment_filenames: a list of segment filenames.
segment_file_path_specs: a list to store the segment file path
specifications in.
Returns:
The fake file system (instance of dvfvs.FakeFileSystem).
"""
resolver_context = context.Context()
file_system = fake_file_system.FakeFileSystem(resolver_context)
file_system.AddFileEntry(
u'/', file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)
for segment_filename in segment_filenames:
path = u'/{0:s}'.format(segment_filename)
file_system.AddFileEntry(path)
segment_file_path_specs.append(fake_path_spec.FakePathSpec(location=path))
return file_system
def testGlobRawSinglecExtension(self):
"""Test the glob function for a RAW single extension scheme."""
# Test single segment file: dd.
segment_filenames = [u'ímynd.dd']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/ímynd.dd')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test single segment file: dmg.
segment_filenames = [u'image.dmg']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.dmg')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test single segment file: img.
segment_filenames = [u'image.img']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.img')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test single segment file: raw.
segment_filenames = [u'image.raw']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.raw')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawAlphabeticalExtension(self):
"""Test the glob function for a RAW alphabetical extension scheme."""
segment_filenames = [u'image.aaa']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: aaa.
path_spec = fake_path_spec.FakePathSpec(location=u'/image.aaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: aaa.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus.aaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: aaa-aak.
segment_filenames = [
u'image.aaa', u'image.aab', u'image.aac', u'image.aad', u'image.aae',
u'image.aaf', u'image.aag', u'image.aah', u'image.aai', u'image.aaj',
u'image.aak']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.aaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: AAA-AAk.
segment_filenames = [
u'image.AAA', u'image.AAB', u'image.AAC', u'image.AAD', u'image.AAE',
u'image.AAF', u'image.AAG', u'image.AAH', u'image.AAI', u'image.AAJ',
u'image.AAK']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.AAA')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawAlphabeticalSuffix(self):
"""Test the glob function for a RAW alphabetical suffix scheme."""
segment_filenames = [u'imageaaa']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: aaa.
path_spec = fake_path_spec.FakePathSpec(location=u'/imageaaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: aaa.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogusaaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: aaa-aak.
segment_filenames = [
u'imageaaa', u'imageaab', u'imageaac', u'imageaad', u'imageaae',
u'imageaaf', u'imageaag', u'imageaah', u'imageaai', u'imageaaj',
u'imageaak']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/imageaaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: AAA-AAk.
segment_filenames = [
u'imageAAA', u'imageAAB', u'imageAAC', u'imageAAD', u'imageAAE',
u'imageAAF', u'imageAAG', u'imageAAH', u'imageAAI', u'imageAAJ',
u'imageAAK']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/imageAAA')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawNumericExtension(self):
"""Test the glob function for a RAW numeric extension scheme."""
segment_filenames = [u'image.000']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 000.
path_spec = fake_path_spec.FakePathSpec(location=u'/image.000')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 000.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus.000')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 000-010.
segment_filenames = [
u'image.000', u'image.001', u'image.002', u'image.003', u'image.004',
u'image.005', u'image.006', u'image.007', u'image.008', u'image.009',
u'image.010']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.000')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image.001', u'image.002', u'image.003', u'image.004', u'image.005',
u'image.006', u'image.007', u'image.008', u'image.009', u'image.010']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.001')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 1-10.
segment_filenames = [
u'image.1', u'image.2', u'image.3', u'image.4', u'image.5',
u'image.6', u'image.7', u'image.8', u'image.9', u'image.10']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawNumericSuffix(self):
"""Test the glob function for a RAW numeric suffix scheme."""
segment_filenames = [u'image1']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 000.
path_spec = fake_path_spec.FakePathSpec(location=u'/image1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 000.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 000-010.
segment_filenames = [
u'image0', u'image1', u'image2', u'image3', u'image4', u'image5',
u'image6', u'image7', u'image8', u'image9', u'image10']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image0')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 1-10.
segment_filenames = [
u'image1', u'image2', u'image3', u'image4', u'image5',
u'image6', u'image7', u'image8', u'image9', u'image10']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image001', u'image002', u'image003', u'image004', u'image005',
u'image006', u'image007', u'image008', u'image009', u'image010']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image001')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawAsbExtension(self):
"""Test the glob function for a RAW ASB extension scheme."""
segment_filenames = [u'image001.asb']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 001.
path_spec = fake_path_spec.FakePathSpec(location=u'/image001.asb')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 001.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus000.asb')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image001.asb', u'image002.asb', u'image003.asb', u'image004.asb',
u'image005.asb', u'image006.asb', u'image007.asb', u'image008.asb',
u'image009.asb', u'image010.asb']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image001.asb')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawVmdkExtension(self):
"""Test the glob function for a RAW VMDK extension scheme."""
segment_filenames = [u'image-f001.vmdk']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 001.
path_spec = fake_path_spec.FakePathSpec(location=u'/image-f001.vmdk')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 001.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus-f000.vmdk')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image-f001.vmdk', u'image-f002.vmdk', u'image-f003.vmdk',
u'image-f004.vmdk', u'image-f005.vmdk', u'image-f006.vmdk',
u'image-f007.vmdk', u'image-f008.vmdk', u'image-f009.vmdk',
u'image-f010.vmdk']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image-f001.vmdk')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
if __name__ == '__main__':
unittest.main()
| [
"dfvfs.lib.raw.RawGlobPathSpec",
"dfvfs.path.raw_path_spec.RawPathSpec",
"dfvfs.vfs.fake_file_system.FakeFileSystem",
"dfvfs.path.fake_path_spec.FakePathSpec",
"unittest.main",
"dfvfs.resolver.context.Context"
] | [((20665, 20680), 'unittest.main', 'unittest.main', ([], {}), '()\n', (20678, 20680), False, 'import unittest\n'), ((992, 1009), 'dfvfs.resolver.context.Context', 'context.Context', ([], {}), '()\n', (1007, 1009), False, 'from dfvfs.resolver import context\n'), ((1028, 1077), 'dfvfs.vfs.fake_file_system.FakeFileSystem', 'fake_file_system.FakeFileSystem', (['resolver_context'], {}), '(resolver_context)\n', (1059, 1077), False, 'from dfvfs.vfs import fake_file_system\n'), ((1770, 1820), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/ímynd.dd"""'}), "(location=u'/ímynd.dd')\n", (1797, 1820), False, 'from dfvfs.path import fake_path_spec\n'), ((1837, 1880), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (1862, 1880), False, 'from dfvfs.path import raw_path_spec\n'), ((1912, 1955), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (1931, 1955), False, 'from dfvfs.lib import raw\n'), ((2390, 2441), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.dmg"""'}), "(location=u'/image.dmg')\n", (2417, 2441), False, 'from dfvfs.path import fake_path_spec\n'), ((2458, 2501), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (2483, 2501), False, 'from dfvfs.path import raw_path_spec\n'), ((2533, 2576), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (2552, 2576), False, 'from dfvfs.lib import raw\n'), ((3011, 3062), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.img"""'}), "(location=u'/image.img')\n", (3038, 3062), False, 'from dfvfs.path import fake_path_spec\n'), ((3079, 3122), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (3104, 3122), False, 'from dfvfs.path import raw_path_spec\n'), ((3154, 3197), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (3173, 3197), False, 'from dfvfs.lib import raw\n'), ((3632, 3683), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.raw"""'}), "(location=u'/image.raw')\n", (3659, 3683), False, 'from dfvfs.path import fake_path_spec\n'), ((3700, 3743), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (3725, 3743), False, 'from dfvfs.path import raw_path_spec\n'), ((3775, 3818), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (3794, 3818), False, 'from dfvfs.lib import raw\n'), ((4373, 4424), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.aaa"""'}), "(location=u'/image.aaa')\n", (4400, 4424), False, 'from dfvfs.path import fake_path_spec\n'), ((4441, 4484), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (4466, 4484), False, 'from dfvfs.path import raw_path_spec\n'), ((4516, 4559), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (4535, 4559), False, 'from dfvfs.lib import raw\n'), ((4850, 4901), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/bogus.aaa"""'}), "(location=u'/bogus.aaa')\n", (4877, 4901), False, 'from dfvfs.path import fake_path_spec\n'), ((4918, 4961), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (4943, 4961), False, 'from dfvfs.path import raw_path_spec\n'), ((4993, 5036), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (5012, 5036), False, 'from dfvfs.lib import raw\n'), ((5643, 5694), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.aaa"""'}), "(location=u'/image.aaa')\n", (5670, 5694), False, 'from dfvfs.path import fake_path_spec\n'), ((5711, 5754), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (5736, 5754), False, 'from dfvfs.path import raw_path_spec\n'), ((5786, 5829), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (5805, 5829), False, 'from dfvfs.lib import raw\n'), ((6436, 6487), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.AAA"""'}), "(location=u'/image.AAA')\n", (6463, 6487), False, 'from dfvfs.path import fake_path_spec\n'), ((6504, 6547), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (6529, 6547), False, 'from dfvfs.path import raw_path_spec\n'), ((6579, 6622), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (6598, 6622), False, 'from dfvfs.lib import raw\n'), ((7170, 7220), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/imageaaa"""'}), "(location=u'/imageaaa')\n", (7197, 7220), False, 'from dfvfs.path import fake_path_spec\n'), ((7237, 7280), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (7262, 7280), False, 'from dfvfs.path import raw_path_spec\n'), ((7312, 7355), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (7331, 7355), False, 'from dfvfs.lib import raw\n'), ((7646, 7696), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/bogusaaa"""'}), "(location=u'/bogusaaa')\n", (7673, 7696), False, 'from dfvfs.path import fake_path_spec\n'), ((7713, 7756), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (7738, 7756), False, 'from dfvfs.path import raw_path_spec\n'), ((7788, 7831), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (7807, 7831), False, 'from dfvfs.lib import raw\n'), ((8427, 8477), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/imageaaa"""'}), "(location=u'/imageaaa')\n", (8454, 8477), False, 'from dfvfs.path import fake_path_spec\n'), ((8494, 8537), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (8519, 8537), False, 'from dfvfs.path import raw_path_spec\n'), ((8569, 8612), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (8588, 8612), False, 'from dfvfs.lib import raw\n'), ((9208, 9258), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/imageAAA"""'}), "(location=u'/imageAAA')\n", (9235, 9258), False, 'from dfvfs.path import fake_path_spec\n'), ((9275, 9318), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (9300, 9318), False, 'from dfvfs.path import raw_path_spec\n'), ((9350, 9393), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (9369, 9393), False, 'from dfvfs.lib import raw\n'), ((9938, 9989), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.000"""'}), "(location=u'/image.000')\n", (9965, 9989), False, 'from dfvfs.path import fake_path_spec\n'), ((10006, 10049), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (10031, 10049), False, 'from dfvfs.path import raw_path_spec\n'), ((10081, 10124), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (10100, 10124), False, 'from dfvfs.lib import raw\n'), ((10415, 10466), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/bogus.000"""'}), "(location=u'/bogus.000')\n", (10442, 10466), False, 'from dfvfs.path import fake_path_spec\n'), ((10483, 10526), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (10508, 10526), False, 'from dfvfs.path import raw_path_spec\n'), ((10558, 10601), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (10577, 10601), False, 'from dfvfs.lib import raw\n'), ((11208, 11259), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.000"""'}), "(location=u'/image.000')\n", (11235, 11259), False, 'from dfvfs.path import fake_path_spec\n'), ((11276, 11319), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (11301, 11319), False, 'from dfvfs.path import raw_path_spec\n'), ((11351, 11394), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (11370, 11394), False, 'from dfvfs.lib import raw\n'), ((11979, 12030), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.001"""'}), "(location=u'/image.001')\n", (12006, 12030), False, 'from dfvfs.path import fake_path_spec\n'), ((12047, 12090), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (12072, 12090), False, 'from dfvfs.path import raw_path_spec\n'), ((12122, 12165), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (12141, 12165), False, 'from dfvfs.lib import raw\n'), ((12728, 12777), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image.1"""'}), "(location=u'/image.1')\n", (12755, 12777), False, 'from dfvfs.path import fake_path_spec\n'), ((12794, 12837), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (12819, 12837), False, 'from dfvfs.path import raw_path_spec\n'), ((12869, 12912), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (12888, 12912), False, 'from dfvfs.lib import raw\n'), ((13448, 13496), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image1"""'}), "(location=u'/image1')\n", (13475, 13496), False, 'from dfvfs.path import fake_path_spec\n'), ((13513, 13556), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (13538, 13556), False, 'from dfvfs.path import raw_path_spec\n'), ((13588, 13631), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (13607, 13631), False, 'from dfvfs.lib import raw\n'), ((13922, 13970), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/bogus1"""'}), "(location=u'/bogus1')\n", (13949, 13970), False, 'from dfvfs.path import fake_path_spec\n'), ((13987, 14030), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (14012, 14030), False, 'from dfvfs.path import raw_path_spec\n'), ((14062, 14105), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (14081, 14105), False, 'from dfvfs.lib import raw\n'), ((14672, 14720), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image0"""'}), "(location=u'/image0')\n", (14699, 14720), False, 'from dfvfs.path import fake_path_spec\n'), ((14737, 14780), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (14762, 14780), False, 'from dfvfs.path import raw_path_spec\n'), ((14812, 14855), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (14831, 14855), False, 'from dfvfs.lib import raw\n'), ((15408, 15456), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image1"""'}), "(location=u'/image1')\n", (15435, 15456), False, 'from dfvfs.path import fake_path_spec\n'), ((15473, 15516), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (15498, 15516), False, 'from dfvfs.path import raw_path_spec\n'), ((15548, 15591), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (15567, 15591), False, 'from dfvfs.lib import raw\n'), ((16166, 16216), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image001"""'}), "(location=u'/image001')\n", (16193, 16216), False, 'from dfvfs.path import fake_path_spec\n'), ((16233, 16276), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (16258, 16276), False, 'from dfvfs.path import raw_path_spec\n'), ((16308, 16351), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (16327, 16351), False, 'from dfvfs.lib import raw\n'), ((16891, 16945), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image001.asb"""'}), "(location=u'/image001.asb')\n", (16918, 16945), False, 'from dfvfs.path import fake_path_spec\n'), ((16962, 17005), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (16987, 17005), False, 'from dfvfs.path import raw_path_spec\n'), ((17037, 17080), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (17056, 17080), False, 'from dfvfs.lib import raw\n'), ((17371, 17425), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/bogus000.asb"""'}), "(location=u'/bogus000.asb')\n", (17398, 17425), False, 'from dfvfs.path import fake_path_spec\n'), ((17442, 17485), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (17467, 17485), False, 'from dfvfs.path import raw_path_spec\n'), ((17517, 17560), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (17536, 17560), False, 'from dfvfs.lib import raw\n'), ((18183, 18237), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image001.asb"""'}), "(location=u'/image001.asb')\n", (18210, 18237), False, 'from dfvfs.path import fake_path_spec\n'), ((18254, 18297), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (18279, 18297), False, 'from dfvfs.path import raw_path_spec\n'), ((18329, 18372), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (18348, 18372), False, 'from dfvfs.lib import raw\n'), ((18917, 18974), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image-f001.vmdk"""'}), "(location=u'/image-f001.vmdk')\n", (18944, 18974), False, 'from dfvfs.path import fake_path_spec\n'), ((18991, 19034), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (19016, 19034), False, 'from dfvfs.path import raw_path_spec\n'), ((19066, 19109), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (19085, 19109), False, 'from dfvfs.lib import raw\n'), ((19400, 19457), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/bogus-f000.vmdk"""'}), "(location=u'/bogus-f000.vmdk')\n", (19427, 19457), False, 'from dfvfs.path import fake_path_spec\n'), ((19474, 19517), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (19499, 19517), False, 'from dfvfs.path import raw_path_spec\n'), ((19549, 19592), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (19568, 19592), False, 'from dfvfs.lib import raw\n'), ((20253, 20310), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'u"""/image-f001.vmdk"""'}), "(location=u'/image-f001.vmdk')\n", (20280, 20310), False, 'from dfvfs.path import fake_path_spec\n'), ((20327, 20370), 'dfvfs.path.raw_path_spec.RawPathSpec', 'raw_path_spec.RawPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (20352, 20370), False, 'from dfvfs.path import raw_path_spec\n'), ((20402, 20445), 'dfvfs.lib.raw.RawGlobPathSpec', 'raw.RawGlobPathSpec', (['file_system', 'path_spec'], {}), '(file_system, path_spec)\n', (20421, 20445), False, 'from dfvfs.lib import raw\n'), ((1349, 1391), 'dfvfs.path.fake_path_spec.FakePathSpec', 'fake_path_spec.FakePathSpec', ([], {'location': 'path'}), '(location=path)\n', (1376, 1391), False, 'from dfvfs.path import fake_path_spec\n')] |
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
import sys
import time
def get_train_loss(line):
splitted_line = line.split(" ")
return float(splitted_line[2]), float(splitted_line[4])
def get_val_loss(line):
splitted_line = line.split(" ")
if len(splitted_line)>19:
return float(splitted_line[19])
return None
def read(logfile):
with open(logfile) as f:
train_y = []
val_y = []
train_epoches = []
val_epoches = []
while True:
line = f.readline()
if line:
epoch, train_data = get_train_loss(line)
val_data = get_val_loss(line)
train_y.append(train_data)
train_epoches.append(epoch)
if val_data is not None:
val_y.append(val_data)
val_epoches.append(epoch)
yield train_epoches, train_y, val_epoches, val_y
else:
time.sleep(0.1)
def main():
if len(sys.argv)<2:
print("Usage: python %s [training log file]" % sys.argv[0])
return
log_file = sys.argv[1]
fig, ax = plt.subplots()
ax.set_xlabel('epochs')
ax.set_ylabel('loss')
ax.grid()
train_line, = ax.plot([], [])
val_line, = ax.plot([], [])
train_line.set_label("Train")
val_line.set_label("Val")
ax.legend()
def animate(values):
train_x, train_y, val_x, val_y = values
print(train_x[-1], train_y[-1])
train_line.set_data(train_x, train_y)
val_line.set_data(val_x, val_y)
ax.set_xlim([train_x[0]-1, train_x[-1]])
max_y = max(train_y)
min_y = min(train_y)
if val_y:
max_y = max(max_y, max(val_y))
min_y = min(min_y, min(val_y))
max_y = min(max_y, 10)
ax.set_ylim([min_y, max_y])
ani = FuncAnimation(fig, animate, frames=read(log_file), interval=1)
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.subplots",
"time.sleep",
"matplotlib.pyplot.show"
] | [((1224, 1238), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1236, 1238), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2018, 2020), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1060), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1055, 1060), False, 'import time\n')] |
from __future__ import annotations
import logging
import pathlib
from logging.handlers import TimedRotatingFileHandler
from os import getenv
from typing import Union, List, Mapping
from bundle.utils.recorder import Recorder
from bundle.utils.cache_file_helpers import CacheFolder, USER_DOCS_PATH
from bundle.seeker import tracer
_CACHE_FOLDER_AUTO_DELETE_ENV_NAME = 'CONTROLLER_CACHE_AUTO_DELETE'
is_auto_delete = bool(int(getenv(_CACHE_FOLDER_AUTO_DELETE_ENV_NAME, False)))
_CACHE_PATH_ENV_NAME = 'CONTROLLER_CACHE_PATH'
controller_cache_path = pathlib.Path(getenv(_CACHE_PATH_ENV_NAME, USER_DOCS_PATH))
class _Controller:
_LOG_FILE_NAME = f'graphery_controller_execution.log'
def __init__(self, cache_path=controller_cache_path, auto_delete: bool = is_auto_delete):
self.main_cache_folder = CacheFolder(cache_path, auto_delete=auto_delete)
self.log_folder = CacheFolder(cache_path / 'log', auto_delete=auto_delete)
# TODO think about this, and the log file location in the sight class
self.log_folder.mkdir(parents=True, exist_ok=True)
self.tracer_cls = tracer
self.recorder = Recorder()
self.controller_logger = self._init_logger()
self.main_cache_folder.__enter__()
self.tracer_cls.set_new_recorder(self.recorder)
def _init_logger(self) -> logging.Logger:
log_file_path = self.log_folder.cache_folder_path / self._LOG_FILE_NAME
logger = logging.getLogger('controller.tracer')
logger.setLevel(logging.DEBUG)
log_file_handler = TimedRotatingFileHandler(log_file_path, when='midnight', backupCount=30)
log_file_handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)-15s::%(levelname)s::%(message)s'
)
log_file_handler.setFormatter(formatter)
logger.addHandler(log_file_handler)
return logger
def get_recorded_content(self) -> List[Mapping]:
return self.recorder.get_change_list()
def get_processed_result(self) -> List[Mapping]:
return self.recorder.get_processed_change_list()
def get_processed_result_json(self) -> str:
return self.recorder.get_change_list_json()
def purge_records(self) -> None:
self.recorder.purge()
def __call__(self, dir_name: Union[str, pathlib.Path] = None,
mode: int = 0o777,
auto_delete: bool = False,
*args, **kwargs) -> CacheFolder:
if dir_name:
return self.main_cache_folder.add_cache_folder(dir_name, mode, auto_delete)
else:
return self.main_cache_folder
def __enter__(self) -> _Controller:
self.tracer_cls.set_logger(self.controller_logger)
# TODO give a prompt that the current session is under this time stamp
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.tracer_cls.set_logger(None)
def __del__(self) -> None:
self.main_cache_folder.__exit__(None, None, None)
controller = _Controller()
| [
"logging.getLogger",
"os.getenv",
"logging.Formatter",
"bundle.utils.recorder.Recorder",
"bundle.utils.cache_file_helpers.CacheFolder",
"logging.handlers.TimedRotatingFileHandler"
] | [((563, 607), 'os.getenv', 'getenv', (['_CACHE_PATH_ENV_NAME', 'USER_DOCS_PATH'], {}), '(_CACHE_PATH_ENV_NAME, USER_DOCS_PATH)\n', (569, 607), False, 'from os import getenv\n'), ((426, 475), 'os.getenv', 'getenv', (['_CACHE_FOLDER_AUTO_DELETE_ENV_NAME', '(False)'], {}), '(_CACHE_FOLDER_AUTO_DELETE_ENV_NAME, False)\n', (432, 475), False, 'from os import getenv\n'), ((816, 864), 'bundle.utils.cache_file_helpers.CacheFolder', 'CacheFolder', (['cache_path'], {'auto_delete': 'auto_delete'}), '(cache_path, auto_delete=auto_delete)\n', (827, 864), False, 'from bundle.utils.cache_file_helpers import CacheFolder, USER_DOCS_PATH\n'), ((891, 947), 'bundle.utils.cache_file_helpers.CacheFolder', 'CacheFolder', (["(cache_path / 'log')"], {'auto_delete': 'auto_delete'}), "(cache_path / 'log', auto_delete=auto_delete)\n", (902, 947), False, 'from bundle.utils.cache_file_helpers import CacheFolder, USER_DOCS_PATH\n'), ((1142, 1152), 'bundle.utils.recorder.Recorder', 'Recorder', ([], {}), '()\n', (1150, 1152), False, 'from bundle.utils.recorder import Recorder\n'), ((1451, 1489), 'logging.getLogger', 'logging.getLogger', (['"""controller.tracer"""'], {}), "('controller.tracer')\n", (1468, 1489), False, 'import logging\n'), ((1556, 1628), 'logging.handlers.TimedRotatingFileHandler', 'TimedRotatingFileHandler', (['log_file_path'], {'when': '"""midnight"""', 'backupCount': '(30)'}), "(log_file_path, when='midnight', backupCount=30)\n", (1580, 1628), False, 'from logging.handlers import TimedRotatingFileHandler\n'), ((1697, 1760), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)-15s::%(levelname)s::%(message)s"""'], {}), "('%(asctime)-15s::%(levelname)s::%(message)s')\n", (1714, 1760), False, 'import logging\n')] |
# Copyright 2018 <NAME>, <NAME>.
# (Strongly inspired by original Google BERT code and Hugging Face's code)
""" Fine-tuning on A Classification Task with pretrained Transformer """
import itertools
import csv
import fire
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import tokenization
import models
import optim
import train
import pdb
import numpy as np
import pandas as pd
from utils import set_seeds, get_device, truncate_tokens_pair
import os
def read_explanations(path):
header = []
uid = None
df = pd.read_csv(path, sep='\t', dtype=str)
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
header.append(name)
if not uid or len(df) == 0:
print('Possibly misformatted file: ' + path)
return []
return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()
tables = '/data/jacob/code/nlp/tfidf/data/annotation/expl-tablestore-export-2017-08-25-230344/tables'
questions = '/data/jacob/code/nlp/tfidf/data/questions/ARC-Elementary+EXPL-Dev.tsv'
def parse_e(e):
l = e.split(' ')
l = [ll.split('|')[0] for ll in l]
return l
class CsvDataset(Dataset):
""" Dataset Class for CSV file """
labels = None
def __init__(self, pipeline=[]): # cvs file and pipeline object
Dataset.__init__(self)
explanations = []
for path, _, files in os.walk(tables):
for file in files:
explanations += read_explanations(os.path.join(path, file))
if not explanations:
warnings.warn('Empty explanations')
df_q = pd.read_csv(questions, sep='\t', dtype=str)
df_e = pd.DataFrame(explanations, columns=('uid', 'text'))
# pdb.set_trace()
q_list = []
e_list = []
dict_e = {}
num_e = len(df_e['uid'])
num_q = len(df_q['questionID'])
for i in range(num_e):
dict_e[df_e['uid'][i]]= df_e['text'][i]
for i in range(num_q):
if not df_q['explanation'][i] is np.nan:
q_list.append(df_q['Question'][i])
e_list.append(parse_e(df_q['explanation'][i]))
self.q_list = q_list
self.e_list = e_list
self.dict_e = dict_e
self.pipeline = pipeline
self.es = list(dict_e.keys())
self.num_neg = 75
# pdb.set_trace()
# data = []
# with open(file, "r") as f:
# # list of splitted lines : line is also list
# lines = csv.reader(f, delimiter='\t', quotechar=None)
# pdb.set_trace()
# for instance in self.get_instances(lines): # instance : tuple of fields
# for proc in pipeline: # a bunch of pre-processing
# instance = proc(instance)
# data.append(instance)
# # To Tensors
# self.tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*data)]
def __len__(self):
return len(self.q_list)
def __getitem__(self, index):
# pdb.set_trace()
q = self.q_list[index]
e = self.e_list[index]
pos = self.dict_e[np.random.choice(e)]
# neg = []
samples = []
instance = ('1', q, pos)
for proc in self.pipeline:
instance = proc(instance)
samples.append(instance)
for i in range(self.num_neg):
# pdb.set_trace()
neg = self.dict_e[np.random.choice(self.es)]
instance = ('0', q, neg)
for proc in self.pipeline:
instance = proc(instance)
samples.append(instance)
# pdb.set_trace()
data = [torch.tensor(x, dtype=torch.long) for x in zip(*samples)]
# data = [d for d in zip(data)]
return data
class Pipeline():
""" Preprocess Pipeline Class : callable """
def __init__(self):
super().__init__()
def __call__(self, instance):
raise NotImplementedError
class Tokenizing(Pipeline):
""" Tokenizing sentence pair """
def __init__(self, preprocessor, tokenize):
super().__init__()
self.preprocessor = preprocessor # e.g. text normalization
self.tokenize = tokenize # tokenize function
def __call__(self, instance):
label, text_a, text_b = instance
label = self.preprocessor(label)
tokens_a = self.tokenize(self.preprocessor(text_a))
tokens_b = self.tokenize(self.preprocessor(text_b)) \
if text_b else []
return (label, tokens_a, tokens_b)
class AddSpecialTokensWithTruncation(Pipeline):
""" Add special tokens [CLS], [SEP] with truncation """
def __init__(self, max_len=512):
super().__init__()
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
# -3 special tokens for [CLS] text_a [SEP] text_b [SEP]
# -2 special tokens for [CLS] text_a [SEP]
_max_len = self.max_len - 3 if tokens_b else self.max_len - 2
truncate_tokens_pair(tokens_a, tokens_b, _max_len)
# Add Special Tokens
tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']
tokens_b = tokens_b + ['[SEP]'] if tokens_b else []
return (label, tokens_a, tokens_b)
class TokenIndexing(Pipeline):
""" Convert tokens into token indexes and do zero-padding """
def __init__(self, indexer, labels, max_len=512):
super().__init__()
self.indexer = indexer # function : tokens to indexes
# map from a label name to a label index
self.label_map = {name: i for i, name in enumerate(labels)}
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
input_ids = self.indexer(tokens_a + tokens_b)
segment_ids = [0]*len(tokens_a) + [1]*len(tokens_b) # token type ids
input_mask = [1]*(len(tokens_a) + len(tokens_b))
label_id = self.label_map[label]
# zero padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, label_id)
class Classifier(nn.Module):
""" Classifier with Transformer """
def __init__(self, cfg, n_labels):
super().__init__()
self.transformer = models.Transformer(cfg)
self.fc = nn.Linear(cfg.dim, cfg.dim)
self.activ = nn.Tanh()
self.drop = nn.Dropout(cfg.p_drop_hidden)
self.classifier = nn.Linear(cfg.dim, n_labels)
def forward(self, input_ids, segment_ids, input_mask):
h = self.transformer(input_ids, segment_ids, input_mask)
# only use the first h in the sequence
pooled_h = self.activ(self.fc(h[:, 0]))
logits = self.classifier(self.drop(pooled_h))
logits = torch.exp(logits).clamp(0, 100)
return logits
#pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
#pretrain_file='../exp/bert/pretrain_100k/model_epoch_3_steps_9732.pt',
def neg_logloss(logits):
score = logits[0] / logits.sum()
loss = -torch.log(score+1e-4)
return loss
def main(task='mrpc',
train_cfg='config/train_mrpc.json',
model_cfg='config/bert_base.json',
data_file='../glue/MRPC/train.tsv',
model_file=None,
pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
data_parallel=True,
vocab='../uncased_L-12_H-768_A-12/vocab.txt',
save_dir='../exp/bert/mrpc',
max_len=128,
mode='train'):
cfg = train.Config.from_json(train_cfg)
model_cfg = models.Config.from_json(model_cfg)
set_seeds(cfg.seed)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)
pipeline = [Tokenizing(tokenizer.convert_to_unicode, tokenizer.tokenize),
AddSpecialTokensWithTruncation(max_len),
TokenIndexing(tokenizer.convert_tokens_to_ids,
('0', '1'), max_len)]
dataset = CsvDataset(pipeline)
# print(dataset[0])
# pdb.set_trace()
data_iter = DataLoader(dataset, batch_size=1, shuffle=True)
model = Classifier(model_cfg, 1)
criterion = nn.CrossEntropyLoss()
trainer = train.Trainer(cfg,
model,
data_iter,
optim.optim4GPU(cfg, model),
save_dir, get_device())
if mode == 'train':
def get_loss(model, batch, global_step): # make sure loss is a scalar tensor
# pdb.set_trace()
input_ids, segment_ids, input_mask, label_id = [b[0] for b in batch]
# pdb.set_trace()
logits = model(input_ids, segment_ids, input_mask)
# pdb.set_trace()
loss = neg_logloss(logits)
# loss = criterion(logits, label_id)
return loss
trainer.train(get_loss, model_file, pretrain_file, data_parallel)
elif mode == 'eval':
def evaluate(model, batch):
input_ids, segment_ids, input_mask, label_id = batch
logits = model(input_ids, segment_ids, input_mask)
_, label_pred = logits.max(1)
result = (label_pred == label_id).float() #.cpu().numpy()
accuracy = result.mean()
return accuracy, result
results = trainer.eval(evaluate, model_file, data_parallel)
total_accuracy = torch.cat(results).mean().item()
print('Accuracy:', total_accuracy)
if __name__ == '__main__':
fire.Fire(main)
| [
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"fire.Fire",
"torch.nn.Tanh",
"torch.exp",
"os.walk",
"utils.truncate_tokens_pair",
"pandas.DataFrame",
"tokenization.FullTokenizer",
"torch.utils.data.Dataset.__init__",
"models.Config.from_json",
"utils.set_seeds",
"nump... | [((563, 601), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\t"""', 'dtype': 'str'}), "(path, sep='\\t', dtype=str)\n", (574, 601), True, 'import pandas as pd\n'), ((7755, 7788), 'train.Config.from_json', 'train.Config.from_json', (['train_cfg'], {}), '(train_cfg)\n', (7777, 7788), False, 'import train\n'), ((7805, 7839), 'models.Config.from_json', 'models.Config.from_json', (['model_cfg'], {}), '(model_cfg)\n', (7828, 7839), False, 'import models\n'), ((7845, 7864), 'utils.set_seeds', 'set_seeds', (['cfg.seed'], {}), '(cfg.seed)\n', (7854, 7864), False, 'from utils import set_seeds, get_device, truncate_tokens_pair\n'), ((7882, 7946), 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'vocab', 'do_lower_case': '(True)'}), '(vocab_file=vocab, do_lower_case=True)\n', (7908, 7946), False, 'import tokenization\n'), ((8295, 8342), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset, batch_size=1, shuffle=True)\n', (8305, 8342), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8397, 8418), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8416, 8418), True, 'import torch.nn as nn\n'), ((9745, 9760), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (9754, 9760), False, 'import fire\n'), ((1439, 1461), 'torch.utils.data.Dataset.__init__', 'Dataset.__init__', (['self'], {}), '(self)\n', (1455, 1461), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1520, 1535), 'os.walk', 'os.walk', (['tables'], {}), '(tables)\n', (1527, 1535), False, 'import os\n'), ((1738, 1781), 'pandas.read_csv', 'pd.read_csv', (['questions'], {'sep': '"""\t"""', 'dtype': 'str'}), "(questions, sep='\\t', dtype=str)\n", (1749, 1781), True, 'import pandas as pd\n'), ((1797, 1848), 'pandas.DataFrame', 'pd.DataFrame', (['explanations'], {'columns': "('uid', 'text')"}), "(explanations, columns=('uid', 'text'))\n", (1809, 1848), True, 'import pandas as pd\n'), ((5181, 5231), 'utils.truncate_tokens_pair', 'truncate_tokens_pair', (['tokens_a', 'tokens_b', '_max_len'], {}), '(tokens_a, tokens_b, _max_len)\n', (5201, 5231), False, 'from utils import set_seeds, get_device, truncate_tokens_pair\n'), ((6527, 6550), 'models.Transformer', 'models.Transformer', (['cfg'], {}), '(cfg)\n', (6545, 6550), False, 'import models\n'), ((6569, 6596), 'torch.nn.Linear', 'nn.Linear', (['cfg.dim', 'cfg.dim'], {}), '(cfg.dim, cfg.dim)\n', (6578, 6596), True, 'import torch.nn as nn\n'), ((6618, 6627), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6625, 6627), True, 'import torch.nn as nn\n'), ((6648, 6677), 'torch.nn.Dropout', 'nn.Dropout', (['cfg.p_drop_hidden'], {}), '(cfg.p_drop_hidden)\n', (6658, 6677), True, 'import torch.nn as nn\n'), ((6704, 6732), 'torch.nn.Linear', 'nn.Linear', (['cfg.dim', 'n_labels'], {}), '(cfg.dim, n_labels)\n', (6713, 6732), True, 'import torch.nn as nn\n'), ((7286, 7311), 'torch.log', 'torch.log', (['(score + 0.0001)'], {}), '(score + 0.0001)\n', (7295, 7311), False, 'import torch\n'), ((8555, 8582), 'optim.optim4GPU', 'optim.optim4GPU', (['cfg', 'model'], {}), '(cfg, model)\n', (8570, 8582), False, 'import optim\n'), ((8622, 8634), 'utils.get_device', 'get_device', ([], {}), '()\n', (8632, 8634), False, 'from utils import set_seeds, get_device, truncate_tokens_pair\n'), ((3284, 3303), 'numpy.random.choice', 'np.random.choice', (['e'], {}), '(e)\n', (3300, 3303), True, 'import numpy as np\n'), ((3809, 3842), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.long'}), '(x, dtype=torch.long)\n', (3821, 3842), False, 'import torch\n'), ((3584, 3609), 'numpy.random.choice', 'np.random.choice', (['self.es'], {}), '(self.es)\n', (3600, 3609), True, 'import numpy as np\n'), ((7024, 7041), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (7033, 7041), False, 'import torch\n'), ((1618, 1642), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1630, 1642), False, 'import os\n'), ((9636, 9654), 'torch.cat', 'torch.cat', (['results'], {}), '(results)\n', (9645, 9654), False, 'import torch\n'), ((976, 986), 'pandas.isna', 'pd.isna', (['s'], {}), '(s)\n', (983, 986), True, 'import pandas as pd\n')] |
import pytest
from teos.extended_appointment import ExtendedAppointment
@pytest.fixture
def ext_appointment_data(generate_dummy_appointment):
return generate_dummy_appointment().to_dict()
# Parent methods are not tested.
def test_init_ext_appointment(ext_appointment_data):
# The appointment has no checks whatsoever, since the inspector is the one taking care or that, and the only one
# creating appointments.
ext_appointment = ExtendedAppointment(
ext_appointment_data["locator"],
ext_appointment_data["encrypted_blob"],
ext_appointment_data["to_self_delay"],
ext_appointment_data["user_id"],
ext_appointment_data["user_signature"],
ext_appointment_data["start_block"],
)
assert (
ext_appointment_data["locator"] == ext_appointment.locator
and ext_appointment_data["to_self_delay"] == ext_appointment.to_self_delay
and ext_appointment_data["encrypted_blob"] == ext_appointment.encrypted_blob
and ext_appointment_data["user_id"] == ext_appointment.user_id
and ext_appointment_data["user_signature"] == ext_appointment.user_signature
and ext_appointment_data["start_block"] == ext_appointment.start_block
)
def test_get_summary(ext_appointment_data):
assert ExtendedAppointment.from_dict(ext_appointment_data).get_summary() == {
"locator": ext_appointment_data["locator"],
"user_id": ext_appointment_data["user_id"],
}
def test_from_dict(ext_appointment_data):
# The appointment should be build if we don't miss any field
ext_appointment = ExtendedAppointment.from_dict(ext_appointment_data)
assert isinstance(ext_appointment, ExtendedAppointment)
# Otherwise it should fail
for key in ext_appointment_data.keys():
prev_val = ext_appointment_data[key]
ext_appointment_data[key] = None
with pytest.raises(ValueError, match="Wrong appointment data"):
ExtendedAppointment.from_dict(ext_appointment_data)
ext_appointment_data[key] = prev_val
| [
"teos.extended_appointment.ExtendedAppointment.from_dict",
"pytest.raises",
"teos.extended_appointment.ExtendedAppointment"
] | [((453, 707), 'teos.extended_appointment.ExtendedAppointment', 'ExtendedAppointment', (["ext_appointment_data['locator']", "ext_appointment_data['encrypted_blob']", "ext_appointment_data['to_self_delay']", "ext_appointment_data['user_id']", "ext_appointment_data['user_signature']", "ext_appointment_data['start_block']"], {}), "(ext_appointment_data['locator'], ext_appointment_data[\n 'encrypted_blob'], ext_appointment_data['to_self_delay'],\n ext_appointment_data['user_id'], ext_appointment_data['user_signature'],\n ext_appointment_data['start_block'])\n", (472, 707), False, 'from teos.extended_appointment import ExtendedAppointment\n'), ((1609, 1660), 'teos.extended_appointment.ExtendedAppointment.from_dict', 'ExtendedAppointment.from_dict', (['ext_appointment_data'], {}), '(ext_appointment_data)\n', (1638, 1660), False, 'from teos.extended_appointment import ExtendedAppointment\n'), ((1897, 1954), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Wrong appointment data"""'}), "(ValueError, match='Wrong appointment data')\n", (1910, 1954), False, 'import pytest\n'), ((1968, 2019), 'teos.extended_appointment.ExtendedAppointment.from_dict', 'ExtendedAppointment.from_dict', (['ext_appointment_data'], {}), '(ext_appointment_data)\n', (1997, 2019), False, 'from teos.extended_appointment import ExtendedAppointment\n'), ((1297, 1348), 'teos.extended_appointment.ExtendedAppointment.from_dict', 'ExtendedAppointment.from_dict', (['ext_appointment_data'], {}), '(ext_appointment_data)\n', (1326, 1348), False, 'from teos.extended_appointment import ExtendedAppointment\n')] |
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 150
_TEST_METADATA_KEY = 'xds_md'
_TEST_METADATA_VALUE_EMPTY = 'empty_ytpme'
_TEST_METADATA = ((RpcTypeEmptyCall, _TEST_METADATA_KEY,
_TEST_METADATA_VALUE_EMPTY),)
match_labels = [{
'name': 'TRAFFICDIRECTOR_NETWORK_NAME',
'value': 'default-vpc'
}]
not_match_labels = [{'name': 'fake', 'value': 'fail'}]
class TestMetadataFilterMatchAll(xds_url_map_testcase.XdsUrlMapTestCase):
"""" The test url-map has two routeRules: the higher priority routes to
the default backends, but is supposed to be filtered out by TD because
of non-matching metadata filters. The lower priority routes to alternative
backends and metadata filter matches. Thus, it verifies that TD evaluates
metadata filters correctly."""
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Empty',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_EMPTY
}],
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Empty")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['name'], _TEST_METADATA_KEY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['exactMatch'], _TEST_METADATA_VALUE_EMPTY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.empty_call_alternative_service_rpc_count)
class TestMetadataFilterMatchAny(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeUnaryCall],
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.unary_call_alternative_service_rpc_count)
class TestMetadataFilterMatchAnyAndAll(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeUnaryCall],
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.unary_call_alternative_service_rpc_count)
class TestMetadataFilterMatchMultipleRules(
xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_EMPTY
}],
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 3)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['name'], _TEST_METADATA_KEY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['exactMatch'], _TEST_METADATA_VALUE_EMPTY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][2]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.empty_call_alternative_service_rpc_count)
if __name__ == '__main__':
absltest.main()
| [
"logging.getLogger",
"absl.testing.absltest.main",
"absl.flags.adopt_module_key_flags"
] | [((1161, 1188), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1178, 1188), False, 'import logging\n'), ((1189, 1239), 'absl.flags.adopt_module_key_flags', 'flags.adopt_module_key_flags', (['xds_url_map_testcase'], {}), '(xds_url_map_testcase)\n', (1217, 1239), False, 'from absl import flags\n'), ((10971, 10986), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (10984, 10986), False, 'from absl.testing import absltest\n')] |
# Training to a set of multiple objects (e.g. ShapeNet or DTU)
# tensorboard logs available in logs/<expname>
import sys
import os
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
import warnings
import trainlib
from model import make_model, loss
from render import NeRFRenderer
from data import get_split_dataset
import util
import numpy as np
import torch.nn.functional as F
import torch
from model import NeuralRenderer
import torchvision.transforms as transforms
from dotmap import DotMap
from PIL import Image
import pdb
from torchvision.utils import save_image, make_grid
warnings.filterwarnings(action='ignore')
def extra_args(parser):
parser.add_argument(
"--batch_size", "-B", type=int, default=32, help="Object batch size ('SB')"
)
parser.add_argument(
"--nviews",
"-V",
type=str,
default="1",
help="Number of source views (multiview); put multiple (space delim) to pick randomly per batch ('NV')",
)
parser.add_argument(
"--freeze_enc",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
parser.add_argument(
"--recon",
type=float,
default=1.,
help="Loss of reconstruction error",
)
parser.add_argument(
"--swap",
type=float,
default=1.,
help="Weights of swap loss error",
)
parser.add_argument(
"--epoch-period",
type=float,
default=1.,
help="period of using discriminator loss",
)
parser.add_argument(
"--disc_lr",
type=float,
default=1.,
help="Discriminator learning rate ratio",
)
parser.add_argument(
"--cam",
type=float,
default=1.,
help="Loss of camera prediction error",
)
parser.add_argument(
"--no_bbox_step",
type=int,
default=100000,
help="Step to stop using bbox sampling",
)
parser.add_argument(
"--fixed_test",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
return parser
args, conf = util.args.parse_args(extra_args, training=True, default_ray_batch_size=128)
device = util.get_cuda(args.gpu_id[0])
train_vis_path = os.path.join(args.visual_path, args.name, 'train')
dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir)
print(
"dset z_near {}, z_far {}, lindisp {}".format(dset.z_near, dset.z_far, dset.lindisp)
)
# make_model: model에 대한 option.
net = make_model(conf["model"]).to(device=device) # PixelNeRFNet
# conf['renderer']
# renderer {
# n_coarse = 64
# n_fine = 32
# # Try using expected depth sample
# n_fine_depth = 16
# # Noise to add to depth sample
# depth_std = 0.01
# # Decay schedule, not used
# sched = []
# # White background color (false : black)
# white_bkgd = True
# }
# Ours로 변경 예정! # from_config: 모델 세팅 알려줌
renderer = NeRFRenderer.from_conf(conf["renderer"], lindisp=dset.lindisp,).to(
device=device # NeRFRenderer -> renderer setting
)
# Parallize # net: pixelNeRF -> pixelNeRF를
render_par = renderer.bind_parallel(net, args.gpu_id).eval() # -> _RenderWrapper를 선언함 -> 얘의 forward 함수가 class NeRFRenderer 실행하는거!
# self까지도 속성받아버림!
# renderer.bind_parallel -> _RenderWrapper(net, self, simple_output=simple_output)
nviews = list(map(int, args.nviews.split())) # 1.
class PixelNeRFTrainer(trainlib.Trainer):
def __init__(self):
super().__init__(net, dset, val_dset, args, conf["train"], device=device) # superclass에서의 init
self.renderer_state_path = "%s/%s/_renderer" % (
self.args.checkpoints_path,
self.args.name,
)
self.lambda_coarse = conf.get_float("loss.lambda_coarse")
self.lambda_fine = conf.get_float("loss.lambda_fine", 1.0)
print(
"lambda coarse {} and fine {}".format(self.lambda_coarse, self.lambda_fine)
)
fine_loss_conf = conf["loss.rgb"]
if "rgb_fine" in conf["loss"]:
print("using fine loss")
fine_loss_conf = conf["loss.rgb_fine"]
self.rgb_fine_crit = loss.get_rgb_loss(fine_loss_conf, False)
if args.resume:
if os.path.exists(self.renderer_state_path):
renderer.load_state_dict(
torch.load(self.renderer_state_path, map_location=device), strict=False
)
self.z_near = dset.z_near # 일단은 그냥 두기
self.z_far = dset.z_far
self.focal = torch.tensor([2.187719,]) * 10
self.c = torch.tensor([8.000000, 8.000000])
self.use_bbox = args.no_bbox_step > 0
self.recon_loss = torch.nn.MSELoss()
self.cam_loss = torch.nn.MSELoss()
# self.optim.add_param_group({'params': self.neural_renderer.parameters()})
def compute_bce(self, d_out, target):
targets = d_out.new_full(size=d_out.size(), fill_value=target)
loss = F.binary_cross_entropy_with_logits(d_out, targets)
return loss
def post_batch(self, epoch, batch):
renderer.sched_step(args.batch_size)
def extra_save_state(self):
torch.save(renderer.state_dict(), self.renderer_state_path)
def calc_losses_eval(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
# SB: number of batches
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
all_poses = data["poses"].to(device=device)
SB, NV, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
if self.use_bbox and global_step >= args.no_bbox_step:
self.use_bbox = False
print(">>> Stopped using bbox sampling @ iter", global_step)
all_rgb_gt = []
all_rays = []
curr_nviews = nviews[torch.randint(0, len(nviews), ()).item()]
if curr_nviews == 1: # (0,) 을 batch size만큼 만들어준다!
image_ord = torch.randint(0, NV, (SB, 1)) # ours -> 계속 nviews=1일 예정!
else: # Pass
image_ord = torch.empty((SB, curr_nviews), dtype=torch.long)
val_num = 4
##### object마다의 Process
##### 여기서는 RGB sampling하는 과정은 아예 빼고, extrinsic을 통한 camera ray를 가져올 것 pix_inds는 필요없음
for obj_idx in range(SB): # batch 안의 index마다 pose가 다르기 때문! # SB: 4 # meshgrid만 괜찮다면 batch 연산으로 큼지막하게 한번 가도 괜찮을듯
# batch size는 작은 편, 각 sample에 대해서 처리함
# 이거 자체가 하나의 batch로서 기능함
# 너무 메모리가 커서 조금 샘플링 해야할 것 같기도..
indices = torch.randint(0, NV, (val_num,)) # (전체 251개의 view 중 5개 뽑기!)
# 딱 5개만 뽑아냄!
images = all_images[obj_idx][indices] # (NV, 3, H, W) # (50, 3, 128, 128)
poses = all_poses[obj_idx][indices] # (NV, 4, 4) # (50, 4, 4) # <- multi-view rotation
focal = self.focal
c = self.c
if curr_nviews > 1: # Pass
# Somewhat inefficient, don't know better way
image_ord[obj_idx] = torch.from_numpy( # 배치 안의 한 샘플에 대해 5개 중에 하나 뽑기!
np.random.choice(indices, curr_nviews, replace=False) # 0부터 4중에 하나 고르기! <- 각 batch마다 어떤 view에서 source image를 가져올지 결정!
) # ex. image_ord[0] = 2 -> 0번째 샘플의 obj index는 2
images_0to1 = images * 0.5 + 0.5
feat_H, feat_W = 16, 16
# ㅇㅇ 다 넣고 봐도 될 듯. 어차피 feature field에 대해서 보는거라!
cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
rgb_gt_all = images_0to1 # image는 encoder에 들어가는 그대로 넣어주면 됨
rgb_gt_all = (
rgb_gt_all.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (NV * H * W, 3)
# 여기선 Ray sampling을 해서 pix_inds를 얻어내려고 하는데, 우리는 Feature map을 보고 하기 때문에
# pix_inds로 인덱싱해줄 대상이 없음. 그냥 이거 자체를 없애도 됨.
rgb_gt = rgb_gt_all # (ray_batch_size, 3)
rays = cam_rays.view(-1, cam_rays.shape[-1]).to(
device=device # 그냥 어떤 resolution에 대해 생성하기 때문..
) # (ray_batch_size, 8)
all_rgb_gt.append(rgb_gt)
all_rays.append(rays)
all_rgb_gt = torch.stack(all_rgb_gt) # (SB, 5*ray_batch_size, 3) # 5장의 이미지
all_rays = torch.stack(all_rays) # (SB, 5*ray_batch_size, 8)
image_ord = image_ord.to(device) # single-view이기 때문에 어차피 0으로 전부 indexing 되어있음
src_images = util.batched_index_select_nd( # NS: number of samples
all_images, image_ord # 모든 이미지에 대해 랜덤하게 뽑은 source image를 가져오게 됨
) # (SB, NS, 3, H, W) <- NV에서 NS로 바뀜 -> index_select_nd에 따라서 결정됨! <- ㅇㅋ 인정 어차피 한 obj 안에 50개 있으니까
src_poses = util.batched_index_select_nd(all_poses, image_ord) # (SB, NS, 4, 4) <- 이 src poses를 예측해보자!
# 4개의 batch, 각 batch의 NS개 중 일부만 골라서 poses로 처리 <- 오키.. <- 이거는 진짜 camera poses
all_poses = all_images = None
# 각 batch마다 하나의 sample src image를 고름
#######################################################################################
################### 여기까지 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
# remove
############### NeRF encoding하는 부분!!!!!!!!
net.encode(
src_images, # batch, 1, 3, 128, 128
src_poses,
self.focal.to(device=device), # batch
c=self.c.to(device=device) if all_c is not None else None,
)
# 하나의 source image에 대해 5개의 feature output을 만듦 -> 전체 sample에 대해서!
# all_rays: ((SB, ray_batch_size, 8)) <- NV images에서의 전체 rays에 SB만큼을!
feat_out = render_par(all_rays, val_num, want_weights=True, training=False) # models.py의 forward 함수를 볼 것
# render par 함수 밑으로 전부 giraffe renderer로 바꾸기
test_out = net.neural_renderer(feat_out)
# test out 있는 여기에 self.neural_renderer 놓기
loss_dict = {}
test_out_pred = test_out.reshape(SB, -1, 3)
rgb_loss = self.recon_loss(test_out_pred, all_rgb_gt)
loss_dict["rc"] = rgb_loss.item() * args.recon
loss = rgb_loss
loss_dict["t"] = loss.item()
return loss_dict
def calc_losses_train_generator(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
SB, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4)
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
# 원래는 object for문에 껴있었는데 그냥 바로 배치 단위로
images_0to1 = all_images * 0.5 + 0.5
rgb_gt_all = (
images_0to1.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (B, H, W, 3)
# feat-W, feat-H 받아야 함!
feat_H = 16 # <- args로 조정 가능하도록!
feat_W = 16 # <- args로 조정 가능하도록! # 아 오키 이거 volume renderer 세팅 따라가고, 다른 부분 있으면 giraffe 모듈 가져오기
net.encode( # <- encode부분은 동일하게 가져오고, forward하는 부분 좀더 신경써서 가져오기!
all_images,
all_poses,
self.focal.to(device=device),
c=self.c.to(device=device)
) # encoder 결과로 self.rotmat, self.shape, self.appearance 예측됨
################################################
########################### for generated views
cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
all_poses, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
rays = cam_rays.view(SB, -1, cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
featmap = render_par(rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_fake = net.neural_renderer(featmap)
################################################
########################### for swapped views
swap_rot = all_poses.flip(0)
swap_cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
swap_rays = swap_cam_rays.view(SB, -1, swap_cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
swap_featmap = render_par(swap_rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_swap = net.neural_renderer(swap_featmap)
if global_step % self.vis_interval == 0:
image_grid = make_grid(torch.cat((all_images, rgb_fake, rgb_swap), dim=0), nrow=len(all_images)) # row에 들어갈 image 갯수
save_image(image_grid, f'{train_vis_path}/{epoch}_{batch}_out.jpg')
# neural renderer를 저 render par 프로세스 안에 넣기!
# discriminator가 swap을 지날 예정!
d_fake = self.discriminator(rgb_swap)
rgb_loss = self.recon_loss(rgb_fake, all_images) # 아 오키. sampling된 points 갯수가 128개인가보군
# net attribute으로 rotmat있는지 확인 + 예측했던 rotmat과 같은지 확인
gen_swap_loss = self.compute_bce(d_fake, 1)
loss_gen = rgb_loss * args.recon + gen_swap_loss * args.swap
return loss_gen, rgb_loss, gen_swap_loss
def calc_losses_train_discriminator(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
SB, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4)
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
# 원래는 object for문에 껴있었는데 그냥 바로 배치 단위로
images_0to1 = all_images * 0.5 + 0.5
rgb_gt_all = (
images_0to1.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (B, H, W, 3)
# feat-W, feat-H 받아야 함!
feat_H = 16 # <- args로 조정 가능하도록!
feat_W = 16 # <- args로 조정 가능하도록! # 아 오키 이거 volume renderer 세팅 따라가고, 다른 부분 있으면 giraffe 모듈 가져오기
net.encode( # <- encode부분은 동일하게 가져오고, forward하는 부분 좀더 신경써서 가져오기!
all_images,
all_poses,
self.focal.to(device=device),
c=self.c.to(device=device)
) # encoder 결과로 self.rotmat, self.shape, self.appearance 예측됨
# ################################################
# ########################### for generated views
# cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
# all_poses, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
# ) # (NV, H, W, 8)
# rays = cam_rays.view(SB, -1, cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
# val_num = 1
# featmap = render_par(rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
# rgb_fake = net.neural_renderer(featmap)
################################################
########################### for swapped views
swap_rot = all_poses.flip(0)
swap_cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
swap_rays = swap_cam_rays.view(SB, -1, swap_cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
swap_featmap = render_par(swap_rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_swap = net.neural_renderer(swap_featmap)
# neural renderer를 저 render par 프로세스 안에 넣기!
# discriminator가 swap을 지날 예정!
d_real = self.discriminator(all_images)
d_fake = self.discriminator(rgb_swap.detach())
disc_swap_loss = self.compute_bce(d_fake, 0)
disc_real_loss = self.compute_bce(d_real, 1)
loss_disc = disc_swap_loss * args.swap + disc_real_loss * args.swap
return loss_disc, disc_swap_loss, disc_real_loss
def train_step(self, data, epoch, batch, global_step):
# discriminator가 먼저 update
dict_ = {}
# generator
# dict(net.named_parameters())["neural_renderer.conv_rgb.3.weight"][0,0,0]
# name neural_renderer.conv_rgb.3.weight | param torch.Size([3, 32, 3, 3]) -> [-0.0322, -0.0191, 0.0099]
# discriminator
# name conv_out.weight | param torch.Size([1, 512, 4, 4]) [0, 0, 0] -> [0.0052, 0.0011, 0.0091, 0.0003]
# ([0.0052, 0.0011, 0.0091, 0.0003], device='cuda:0', <- 얘는 왜 안변해..?
if epoch % args.epoch_period == 0:
disc_loss, disc_swap, disc_real = self.calc_losses_train_discriminator(data, epoch=epoch, batch=batch, global_step=global_step)
self.optim_d.zero_grad()
disc_loss.backward()
self.optim_d.step()
dict_['disc_loss'] = round(disc_loss.item(), 3)
dict_['disc_swap'] = round(disc_swap.item(), 3)
dict_['disc_real'] = round(disc_real.item(), 3)
# name neural_renderer.conv_rgb.3.weight : tensor([-0.0322, -0.0191, 0.0099], device='cuda:0', grad_fn=<SelectBackward0>) <- 안바뀜
# generator 그다음에 update
gen_loss, gen_rgb, gen_swap = self.calc_losses_train_generator(data, epoch=epoch, batch=batch, global_step=global_step)
self.optim.zero_grad()
gen_loss.backward()
self.optim.step()
# tensor([-0.0321, -0.0190, 0.0100], device='cuda:0', grad_fn=<SelectBackward0>) <- 바뀜
# tensor([0.0052, 0.0011, 0.0091, 0.0003], device='cuda:0') <- 안바뀜 <- discriminator가 학습이 안되고 있음
dict_['gen_loss'] = round(gen_loss.item(), 3)
dict_['gen_rgb'] = round(gen_rgb.item(), 3)
dict_['gen_swap'] = round(gen_swap.item(), 3)
return dict_
def eval_step(self, data, global_step):
renderer.eval()
losses = self.calc_losses_eval(data, global_step=global_step)
renderer.train()
return losses
# 얘네는 기존의 data loader 그대로 활용하도록 고고
def vis_step(self, data, global_step, epoch, batch, idx=None):
if "images" not in data:
return {}
if idx is None:
batch_indices = np.random.randint(0, data["images"].shape[0], 4) # 16 = batch -> (16, 251, 3, 128, 128)
else:
print(idx)
batch_indices = idx
total_psnr = 0
cat_list = []
for batch_idx in batch_indices:
# 16개 batch objects 중에 하나의 batch index를
images = data["images"][batch_idx].to(device=device) # (NV, 3, H, W)
poses = data["poses"][batch_idx].to(device=device) # (NV, 4, 4)
focal = self.focal # (1)
c = self.c
feat_H, feat_W = 16, 16
NV, _, H, W = images.shape
cam_rays = util.gen_rays( # (251개의 poses에 대해서 만듦..)
poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c # (251, 16, 16, 8)
) # (NV, H, W, 8)
images_0to1 = images * 0.5 + 0.5 # (NV, 3, H, W) # (251, 3, 128, 128)
val_num = 3
# curr_nviews를 4개로 잡아볼까
curr_nviews = nviews[torch.randint(0, len(nviews), (1,)).item()] # curr_nviews = 1
views_src = np.sort(np.random.choice(NV, curr_nviews, replace=False)) # NV: 251 -> ex.views_src: 여러 이미지들 나오는디요 시발
view_dests = np.random.randint(0, NV - curr_nviews, val_num) # ex. 63
for vs in range(curr_nviews):
view_dests += view_dests >= views_src[vs]
views_src = torch.from_numpy(views_src)
# set renderer net to eval mode
renderer.eval() # <- encoder는 왜 eval() 아니지 # renderer의 parameter 찾고 여기에 2DCNN 포함되는지 확인!
source_views = (
images_0to1[views_src].repeat(val_num, 1, 1, 1)
.permute(0, 2, 3, 1)
.cpu()
.numpy()
.reshape(-1, H, W, 3) # (3, 128, 128, 3)
)
gt = images_0to1[view_dests].permute(0, 2, 3, 1).cpu().numpy().reshape(val_num, H, W, 3) # (128, 128, 3)
with torch.no_grad(): # cam_rays: (NV, 16, 16, 8)
test_rays_dest = cam_rays[view_dests] # (3, H, W, 8) # -> (val_num, 16, 16, 8)
test_rays_src = cam_rays[views_src].repeat(val_num, 1, 1, 1) # (H, W, 8) # -> (16, 16, 8)
test_images_src = images[views_src].repeat(val_num, 1, 1, 1) # (NS, 3, H, W) # -> (3, 128, 128)
test_images_dest = images[view_dests] # -> # -> (val_num, 3, 128, 128)
net.encode(
test_images_src, # (val_num, 3, 128, 128)
poses[views_src].repeat(val_num, 1, 1), # (val_num, 4, 4)
self.focal.to(device=device),
c=self.c.to(device=device),
)
test_rays_dest = test_rays_dest.reshape(val_num, feat_H * feat_W, -1) # -> (1, 16*16, 8)
test_rays_src = test_rays_src.reshape(val_num, feat_H * feat_W, -1) # -> (1, 16*16, 8)
# test_rays: 1, 16x16, 8
feat_test_dest = render_par(test_rays_dest, val_num = 1, want_weights=True) # -> (1, 16*16, 8)
out_dest = net.neural_renderer(feat_test_dest)
feat_test_src = render_par(test_rays_src, val_num = 1, want_weights=True) # -> (1, 16*16, 8)
out_src = net.neural_renderer(feat_test_src)
rgb_psnr = out_dest.cpu().numpy().reshape(val_num, H, W, 3)
# for vals calculation
psnr = util.psnr(rgb_psnr, gt)
total_psnr += psnr
# source views, gt, test_out
cat = torch.cat((test_images_src[[0]], test_images_dest.reshape(-1, 3, H, W), out_src[[0]].clamp_(0., 1.), out_dest.reshape(-1, 3, H, W).clamp_(0., 1.)), dim=0)
cat_list.append(cat)
# new_cat = torch.stack(cat_list, dim=0).reshape(-1, 3, 128, 128)
new_cat = torch.cat(cat_list, dim=0)
image_grid = make_grid(new_cat, nrow=len(cat)) # row에 들어갈 image 갯수
save_image(image_grid, f'visuals/{args.name}/{epoch}_{batch}_out.jpg')
vals = {"psnr": total_psnr / len(batch_indices)}
print("psnr", total_psnr / len(batch_indices))
# set the renderer network back to train mode
renderer.train()
return None, vals
trainer = PixelNeRFTrainer()
trainer.start()
| [
"util.get_cuda",
"torch.from_numpy",
"torch.nn.MSELoss",
"data.get_split_dataset",
"torchvision.utils.save_image",
"render.NeRFRenderer.from_conf",
"os.path.exists",
"util.gen_rays",
"torch.randint",
"model.loss.get_rgb_loss",
"util.batched_index_select_nd",
"numpy.random.choice",
"util.psnr... | [((630, 670), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""'}), "(action='ignore')\n", (653, 670), False, 'import warnings\n'), ((2238, 2313), 'util.args.parse_args', 'util.args.parse_args', (['extra_args'], {'training': '(True)', 'default_ray_batch_size': '(128)'}), '(extra_args, training=True, default_ray_batch_size=128)\n', (2258, 2313), False, 'import util\n'), ((2323, 2352), 'util.get_cuda', 'util.get_cuda', (['args.gpu_id[0]'], {}), '(args.gpu_id[0])\n', (2336, 2352), False, 'import util\n'), ((2371, 2421), 'os.path.join', 'os.path.join', (['args.visual_path', 'args.name', '"""train"""'], {}), "(args.visual_path, args.name, 'train')\n", (2383, 2421), False, 'import os\n'), ((2443, 2495), 'data.get_split_dataset', 'get_split_dataset', (['args.dataset_format', 'args.datadir'], {}), '(args.dataset_format, args.datadir)\n', (2460, 2495), False, 'from data import get_split_dataset\n'), ((2634, 2659), 'model.make_model', 'make_model', (["conf['model']"], {}), "(conf['model'])\n", (2644, 2659), False, 'from model import make_model, loss\n'), ((3077, 3139), 'render.NeRFRenderer.from_conf', 'NeRFRenderer.from_conf', (["conf['renderer']"], {'lindisp': 'dset.lindisp'}), "(conf['renderer'], lindisp=dset.lindisp)\n", (3099, 3139), False, 'from render import NeRFRenderer\n'), ((4305, 4345), 'model.loss.get_rgb_loss', 'loss.get_rgb_loss', (['fine_loss_conf', '(False)'], {}), '(fine_loss_conf, False)\n', (4322, 4345), False, 'from model import make_model, loss\n'), ((4735, 4759), 'torch.tensor', 'torch.tensor', (['[8.0, 8.0]'], {}), '([8.0, 8.0])\n', (4747, 4759), False, 'import torch\n'), ((4842, 4860), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4858, 4860), False, 'import torch\n'), ((4885, 4903), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4901, 4903), False, 'import torch\n'), ((5117, 5167), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['d_out', 'targets'], {}), '(d_out, targets)\n', (5151, 5167), True, 'import torch.nn.functional as F\n'), ((9023, 9046), 'torch.stack', 'torch.stack', (['all_rgb_gt'], {}), '(all_rgb_gt)\n', (9034, 9046), False, 'import torch\n'), ((9109, 9130), 'torch.stack', 'torch.stack', (['all_rays'], {}), '(all_rays)\n', (9120, 9130), False, 'import torch\n'), ((9273, 9324), 'util.batched_index_select_nd', 'util.batched_index_select_nd', (['all_images', 'image_ord'], {}), '(all_images, image_ord)\n', (9301, 9324), False, 'import util\n'), ((9546, 9596), 'util.batched_index_select_nd', 'util.batched_index_select_nd', (['all_poses', 'image_ord'], {}), '(all_poses, image_ord)\n', (9574, 9596), False, 'import util\n'), ((11058, 11069), 'model.loss.item', 'loss.item', ([], {}), '()\n', (11067, 11069), False, 'from model import make_model, loss\n'), ((12708, 12798), 'util.gen_rays', 'util.gen_rays', (['all_poses', 'feat_W', 'feat_H', 'self.focal', 'self.z_near', 'self.z_far', 'self.c'], {}), '(all_poses, feat_W, feat_H, self.focal, self.z_near, self.\n z_far, self.c)\n', (12721, 12798), False, 'import util\n'), ((13415, 13503), 'util.gen_rays', 'util.gen_rays', (['swap_rot', 'feat_W', 'feat_H', 'self.focal', 'self.z_near', 'self.z_far', 'self.c'], {}), '(swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far,\n self.c)\n', (13428, 13503), False, 'import util\n'), ((17042, 17130), 'util.gen_rays', 'util.gen_rays', (['swap_rot', 'feat_W', 'feat_H', 'self.focal', 'self.z_near', 'self.z_far', 'self.c'], {}), '(swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far,\n self.c)\n', (17055, 17130), False, 'import util\n'), ((24141, 24167), 'torch.cat', 'torch.cat', (['cat_list'], {'dim': '(0)'}), '(cat_list, dim=0)\n', (24150, 24167), False, 'import torch\n'), ((24252, 24322), 'torchvision.utils.save_image', 'save_image', (['image_grid', 'f"""visuals/{args.name}/{epoch}_{batch}_out.jpg"""'], {}), "(image_grid, f'visuals/{args.name}/{epoch}_{batch}_out.jpg')\n", (24262, 24322), False, 'from torchvision.utils import save_image, make_grid\n'), ((186, 211), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (201, 211), False, 'import os\n'), ((4386, 4426), 'os.path.exists', 'os.path.exists', (['self.renderer_state_path'], {}), '(self.renderer_state_path)\n', (4400, 4426), False, 'import os\n'), ((4687, 4711), 'torch.tensor', 'torch.tensor', (['[2.187719]'], {}), '([2.187719])\n', (4699, 4711), False, 'import torch\n'), ((6573, 6602), 'torch.randint', 'torch.randint', (['(0)', 'NV', '(SB, 1)'], {}), '(0, NV, (SB, 1))\n', (6586, 6602), False, 'import torch\n'), ((6678, 6726), 'torch.empty', 'torch.empty', (['(SB, curr_nviews)'], {'dtype': 'torch.long'}), '((SB, curr_nviews), dtype=torch.long)\n', (6689, 6726), False, 'import torch\n'), ((7166, 7198), 'torch.randint', 'torch.randint', (['(0)', 'NV', '(val_num,)'], {}), '(0, NV, (val_num,))\n', (7179, 7198), False, 'import torch\n'), ((8111, 8184), 'util.gen_rays', 'util.gen_rays', (['poses', 'feat_W', 'feat_H', 'focal', 'self.z_near', 'self.z_far'], {'c': 'c'}), '(poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c)\n', (8124, 8184), False, 'import util\n'), ((14169, 14236), 'torchvision.utils.save_image', 'save_image', (['image_grid', 'f"""{train_vis_path}/{epoch}_{batch}_out.jpg"""'], {}), "(image_grid, f'{train_vis_path}/{epoch}_{batch}_out.jpg')\n", (14179, 14236), False, 'from torchvision.utils import save_image, make_grid\n'), ((20241, 20289), 'numpy.random.randint', 'np.random.randint', (['(0)', "data['images'].shape[0]", '(4)'], {}), "(0, data['images'].shape[0], 4)\n", (20258, 20289), True, 'import numpy as np\n'), ((20867, 20940), 'util.gen_rays', 'util.gen_rays', (['poses', 'feat_W', 'feat_H', 'focal', 'self.z_near', 'self.z_far'], {'c': 'c'}), '(poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c)\n', (20880, 20940), False, 'import util\n'), ((21446, 21493), 'numpy.random.randint', 'np.random.randint', (['(0)', '(NV - curr_nviews)', 'val_num'], {}), '(0, NV - curr_nviews, val_num)\n', (21463, 21493), True, 'import numpy as np\n'), ((21628, 21655), 'torch.from_numpy', 'torch.from_numpy', (['views_src'], {}), '(views_src)\n', (21644, 21655), False, 'import torch\n'), ((14062, 14112), 'torch.cat', 'torch.cat', (['(all_images, rgb_fake, rgb_swap)'], {'dim': '(0)'}), '((all_images, rgb_fake, rgb_swap), dim=0)\n', (14071, 14112), False, 'import torch\n'), ((21325, 21373), 'numpy.random.choice', 'np.random.choice', (['NV', 'curr_nviews'], {'replace': '(False)'}), '(NV, curr_nviews, replace=False)\n', (21341, 21373), True, 'import numpy as np\n'), ((22207, 22222), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22220, 22222), False, 'import torch\n'), ((23728, 23751), 'util.psnr', 'util.psnr', (['rgb_psnr', 'gt'], {}), '(rgb_psnr, gt)\n', (23737, 23751), False, 'import util\n'), ((4490, 4547), 'torch.load', 'torch.load', (['self.renderer_state_path'], {'map_location': 'device'}), '(self.renderer_state_path, map_location=device)\n', (4500, 4547), False, 'import torch\n'), ((7750, 7803), 'numpy.random.choice', 'np.random.choice', (['indices', 'curr_nviews'], {'replace': '(False)'}), '(indices, curr_nviews, replace=False)\n', (7766, 7803), True, 'import numpy as np\n')] |
"""
basic.py : Some basic classes encapsulating filter chains
* Copyright 2017-2020 Valkka Security Ltd. and <NAME>
*
* Authors: <NAME> <<EMAIL>>
*
* This file is part of the Valkka library.
*
* Valkka is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>
*
*/
@file basic.py
@author <NAME>
@date 2017
@version 1.2.2
@brief Some basic classes encapsulating filter chains
"""
import sys
import time
import random
# so, everything that has .core, refers to the api1 level (i.e. swig
# wrapped cpp code)
from valkka import core
# api2 versions of the thread classes
from valkka.api2.threads import LiveThread, OpenGLThread
from valkka.api2.tools import parameterInitCheck, typeCheck
pre_mod = "valkka.api2.chains.basic : "
class BasicFilterchain:
"""This class implements the following filterchain:
::
(LiveThread:livethread) -->> (AVThread:avthread) -->> (OpenGLThread:glthread)
i.e. the stream is decoded by an AVThread and sent to the OpenGLThread for presentation
"""
parameter_defs = {
"livethread": LiveThread,
"openglthread": OpenGLThread,
"address": str,
"slot": int,
# these are for the AVThread instance:
"n_basic": (int, 20), # number of payload frames in the stack
"n_setup": (int, 20), # number of setup frames in the stack
"n_signal": (int, 20), # number of signal frames in the stack
"flush_when_full": (bool, False), # clear fifo at overflow
"affinity": (int, -1),
"verbose": (bool, False),
"msreconnect": (int, 0),
# Timestamp correction type: TimeCorrectionType_none,
# TimeCorrectionType_dummy, or TimeCorrectionType_smart (default)
"time_correction": None,
# Operating system socket ringbuffer size in bytes # 0 means default
"recv_buffer_size": (int, 0),
# Reordering buffer time for Live555 packets in MILLIseconds # 0 means
# default
"reordering_mstime": (int, 0),
"n_threads": (int, 1)
}
def __init__(self, **kwargs):
# auxiliary string for debugging output
self.pre = self.__class__.__name__ + " : "
# check for input parameters, attach them to this instance as
# attributes
parameterInitCheck(self.parameter_defs, kwargs, self)
self.init()
def init(self):
self.idst = str(id(self))
self.makeChain()
self.createContext()
self.startThreads()
self.active = True
def __del__(self):
self.close()
def close(self):
if (self.active):
if (self.verbose):
print(self.pre, "Closing threads and contexes")
self.decodingOff()
self.closeContext()
self.stopThreads()
self.active = False
def makeChain(self):
"""Create the filter chain
"""
self.gl_in_filter = self.openglthread.getInput(
) # get input FrameFilter from OpenGLThread
self.framefifo_ctx = core.FrameFifoContext()
self.framefifo_ctx.n_basic = self.n_basic
self.framefifo_ctx.n_setup = self.n_setup
self.framefifo_ctx.n_signal = self.n_signal
self.framefifo_ctx.flush_when_full = self.flush_when_full
self.avthread = core.AVThread(
"avthread_" + self.idst,
self.gl_in_filter,
self.framefifo_ctx)
if self.affinity > -1 and self.n_threads > 1:
print("WARNING: can't use affinity with multiple threads")
self.avthread.setAffinity(self.affinity)
if self.affinity > -1:
self.avthread.setNumberOfThreads(self.n_threads)
# get input FrameFilter from AVThread
self.av_in_filter = self.avthread.getFrameFilter()
def createContext(self):
"""Creates a LiveConnectionContext and registers it to LiveThread
"""
# define stream source, how the stream is passed on, etc.
self.ctx = core.LiveConnectionContext()
# slot number identifies the stream source
self.ctx.slot = self.slot
if (self.address.find("rtsp://") == 0):
self.ctx.connection_type = core.LiveConnectionType_rtsp
else:
self.ctx.connection_type = core.LiveConnectionType_sdp # this is an rtsp connection
self.ctx.address = self.address
# stream address, i.e. "rtsp://.."
self.ctx.framefilter = self.av_in_filter
self.ctx.msreconnect = self.msreconnect
# some extra parameters:
"""
// ctx.time_correction =TimeCorrectionType::none;
// ctx.time_correction =TimeCorrectionType::dummy;
// default time correction is smart
// ctx.recv_buffer_size=1024*1024*2; // Operating system ringbuffer size for incoming socket
// ctx.reordering_time =100000; // Live555 packet reordering treshold time (microsecs)
"""
if (self.time_correction is not None):
self.ctx.time_correction = self.time_correction
# self.time_correction=core.TimeCorrectionType_smart # default ..
self.ctx.recv_buffer_size = self.recv_buffer_size
self.ctx.reordering_time = self.reordering_mstime * \
1000 # from millisecs to microsecs
# send the information about the stream to LiveThread
self.livethread.registerStream(self.ctx)
self.livethread.playStream(self.ctx)
def closeContext(self):
self.livethread.stopStream(self.ctx)
self.livethread.deregisterStream(self.ctx)
def startThreads(self):
"""Starts thread required by the filter chain
"""
self.avthread.startCall()
def stopThreads(self):
"""Stops threads in the filter chain
"""
if (self.verbose):
print(self.pre, "stopping avthread")
self.avthread.stopCall()
if (self.verbose):
print(self.pre, "avthread stopped")
def decodingOff(self):
self.avthread.decodingOffCall()
def decodingOn(self):
self.avthread.decodingOnCall()
class ShmemFilterchain(BasicFilterchain):
"""A filter chain with a shared mem hook
::
(LiveThread:livethread) -->> (AVThread:avthread) --+
| main branch
{ForkFrameFilter: fork_filter} <-------------------+
|
branch 1 +-->> (OpenGLThread:glthread)
|
branch 2 +--> {IntervalFrameFilter: interval_filter} --> {SwScaleFrameFilter: sws_filter} --> {RGBShmemFrameFilter: shmem_filter}
* Frames are decoded in the main branch from H264 => YUV
* The stream of YUV frames is forked into two branches
* branch 1 goes to OpenGLThread that interpolates YUV to RGB on the GPU
* branch 2 goes to interval_filter that passes a YUV frame only once every second. From there, frames are interpolated on the CPU from YUV to RGB and finally passed through shared memory to another process.
"""
parameter_defs = { # additional parameters to the mother class
# images passed over shmem are full-hd/4 reso
"shmem_image_dimensions": (tuple, (1920 // 4, 1080 // 4)),
# .. passed every 1000 milliseconds
"shmem_image_interval": (int, 1000),
# size of the ringbuffer
"shmem_ringbuffer_size": (int, 10),
"shmem_name": None,
"event_fd": None
}
parameter_defs.update(BasicFilterchain.parameter_defs) # don't forget!
def __init__(self, **kwargs):
# auxiliary string for debugging output
self.pre = self.__class__.__name__ + " : "
# check for input parameters, attach them to this instance as
# attributes
parameterInitCheck(self.parameter_defs, kwargs, self)
typeCheck(self.shmem_image_dimensions[0], int)
typeCheck(self.shmem_image_dimensions[1], int)
self.init()
def makeChain(self):
"""Create the filter chain
"""
if (self.shmem_name is None):
self.shmem_name = "shmemff" + self.idst
# print(self.pre,self.shmem_name)
# self.n_bytes =self.shmem_image_dimensions[0]*self.shmem_image_dimensions[1]*3
n_buf = self.shmem_ringbuffer_size
# branch 1
# get input FrameFilter from OpenGLThread
self.gl_in_filter = self.openglthread.getInput()
# branch 2
# print(self.pre,"using shmem name",self.shmem_name)
# print(self.shmem_name)
self.shmem_filter = core.RGBShmemFrameFilter(
self.shmem_name,
n_buf,
self.shmem_image_dimensions[0],
self.shmem_image_dimensions[1]) # shmem id, cells, width, height
# self.shmem_filter =core.InfoFrameFilter ("info"+self.idst)
if self.event_fd is not None:
self.shmem_filter.useFd(self.event_fd)
self.sws_filter = core.SwScaleFrameFilter(
"sws_filter" + self.idst,
self.shmem_image_dimensions[0],
self.shmem_image_dimensions[1],
self.shmem_filter)
self.interval_filter = core.TimeIntervalFrameFilter(
"interval_filter" + self.idst, self.shmem_image_interval, self.sws_filter)
# fork: writes to branches 1 and 2
# self.fork_filter =core.ForkFrameFilter
# ("fork_filter"+self.idst,self.gl_in_filter,self.sws_filter) # FIX
self.fork_filter = core.ForkFrameFilter(
"fork_filter" + self.idst,
self.gl_in_filter,
self.interval_filter)
# self.fork_filter =core.ForkFrameFilter ("fork_filter"+self.idst,self.gl_in_filter,None)
# self.fork_filter=self.gl_in_filter # debugging
# main branch
self.framefifo_ctx = core.FrameFifoContext()
self.framefifo_ctx.n_basic = self.n_basic
self.framefifo_ctx.n_setup = self.n_setup
self.framefifo_ctx.n_signal = self.n_signal
self.framefifo_ctx.flush_when_full = self.flush_when_full
self.avthread = core.AVThread(
"avthread_" + self.idst,
self.fork_filter,
self.framefifo_ctx) # AVThread writes to self.fork_filter
self.avthread.setAffinity(self.affinity)
# get input FrameFilter from AVThread
self.av_in_filter = self.avthread.getFrameFilter()
# self.av_in_filter is used by BasicFilterchain.createContext that passes self.av_in_filter to LiveThread
# # self.live_out_filter =core.InfoFrameFilter ("live_out_filter"+self.idst,self.av_in_filter)
def getShmemPars(self):
"""Returns shared mem name that should be used in the client process and the ringbuffer size
"""
# SharedMemRingBuffer(const char* name, int n_cells, std::size_t n_bytes, int mstimeout=0, bool is_server=false); // <pyapi>
# return self.shmem_name, self.shmem_ringbuffer_size, self.n_bytes
return self.shmem_name, self.shmem_ringbuffer_size, self.shmem_image_dimensions
def test1():
st = """ Test single stream
"""
pre = pre_mod + "test1 :"
print(pre, st)
livethread = LiveThread(
name="live_thread",
verbose=True
)
openglthread = OpenGLThread(
name="mythread",
n_1440p=5,
verbose=True
)
# now livethread and openglthread are running ..
chain = BasicFilterchain(
livethread=livethread,
openglthread=openglthread,
address="rtsp://admin:admin@192.168.1.10",
slot=1
)
print("sleeping for some secs")
time.sleep(3)
print("bye!")
def test2():
st = """ Test ShmemFilterchain
"""
pre = pre_mod + "test2 :"
print(pre, st)
livethread = LiveThread(
name="live_thread",
verbose=True
)
openglthread = OpenGLThread(
name="mythread",
n_1440p=5,
verbose=True
)
# now livethread and openglthread are running ..
chain = ShmemFilterchain(
livethread=livethread,
openglthread=openglthread,
address="rtsp://admin:admin@192.168.1.10",
slot=1,
# images passed over shmem are full-hd/4 reso
shmem_image_dimensions=(1920 // 4, 1080 // 4),
shmem_image_interval=1000, # .. passed every 1000 milliseconds
shmem_ringbuffer_size=10 # size of the ringbuffer
)
print("sleeping for some secs")
time.sleep(3)
print("bye!")
def main():
pre = pre_mod + "main :"
print(pre, "main: arguments: ", sys.argv)
if (len(sys.argv) < 2):
print(pre, "main: needs test number")
else:
st = "test" + str(sys.argv[1]) + "()"
exec(st)
if (__name__ == "__main__"):
main()
| [
"valkka.core.RGBShmemFrameFilter",
"valkka.core.AVThread",
"valkka.core.FrameFifoContext",
"time.sleep",
"valkka.api2.threads.LiveThread",
"valkka.core.SwScaleFrameFilter",
"valkka.api2.threads.OpenGLThread",
"valkka.core.TimeIntervalFrameFilter",
"valkka.core.ForkFrameFilter",
"valkka.api2.tools.... | [((11802, 11846), 'valkka.api2.threads.LiveThread', 'LiveThread', ([], {'name': '"""live_thread"""', 'verbose': '(True)'}), "(name='live_thread', verbose=True)\n", (11812, 11846), False, 'from valkka.api2.threads import LiveThread, OpenGLThread\n'), ((11889, 11943), 'valkka.api2.threads.OpenGLThread', 'OpenGLThread', ([], {'name': '"""mythread"""', 'n_1440p': '(5)', 'verbose': '(True)'}), "(name='mythread', n_1440p=5, verbose=True)\n", (11901, 11943), False, 'from valkka.api2.threads import LiveThread, OpenGLThread\n'), ((12238, 12251), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (12248, 12251), False, 'import time\n'), ((12395, 12439), 'valkka.api2.threads.LiveThread', 'LiveThread', ([], {'name': '"""live_thread"""', 'verbose': '(True)'}), "(name='live_thread', verbose=True)\n", (12405, 12439), False, 'from valkka.api2.threads import LiveThread, OpenGLThread\n'), ((12482, 12536), 'valkka.api2.threads.OpenGLThread', 'OpenGLThread', ([], {'name': '"""mythread"""', 'n_1440p': '(5)', 'verbose': '(True)'}), "(name='mythread', n_1440p=5, verbose=True)\n", (12494, 12536), False, 'from valkka.api2.threads import LiveThread, OpenGLThread\n'), ((13099, 13112), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (13109, 13112), False, 'import time\n'), ((2873, 2926), 'valkka.api2.tools.parameterInitCheck', 'parameterInitCheck', (['self.parameter_defs', 'kwargs', 'self'], {}), '(self.parameter_defs, kwargs, self)\n', (2891, 2926), False, 'from valkka.api2.tools import parameterInitCheck, typeCheck\n'), ((3637, 3660), 'valkka.core.FrameFifoContext', 'core.FrameFifoContext', ([], {}), '()\n', (3658, 3660), False, 'from valkka import core\n'), ((3904, 3981), 'valkka.core.AVThread', 'core.AVThread', (["('avthread_' + self.idst)", 'self.gl_in_filter', 'self.framefifo_ctx'], {}), "('avthread_' + self.idst, self.gl_in_filter, self.framefifo_ctx)\n", (3917, 3981), False, 'from valkka import core\n'), ((4619, 4647), 'valkka.core.LiveConnectionContext', 'core.LiveConnectionContext', ([], {}), '()\n', (4645, 4647), False, 'from valkka import core\n'), ((8392, 8445), 'valkka.api2.tools.parameterInitCheck', 'parameterInitCheck', (['self.parameter_defs', 'kwargs', 'self'], {}), '(self.parameter_defs, kwargs, self)\n', (8410, 8445), False, 'from valkka.api2.tools import parameterInitCheck, typeCheck\n'), ((8454, 8500), 'valkka.api2.tools.typeCheck', 'typeCheck', (['self.shmem_image_dimensions[0]', 'int'], {}), '(self.shmem_image_dimensions[0], int)\n', (8463, 8500), False, 'from valkka.api2.tools import parameterInitCheck, typeCheck\n'), ((8509, 8555), 'valkka.api2.tools.typeCheck', 'typeCheck', (['self.shmem_image_dimensions[1]', 'int'], {}), '(self.shmem_image_dimensions[1], int)\n', (8518, 8555), False, 'from valkka.api2.tools import parameterInitCheck, typeCheck\n'), ((9182, 9299), 'valkka.core.RGBShmemFrameFilter', 'core.RGBShmemFrameFilter', (['self.shmem_name', 'n_buf', 'self.shmem_image_dimensions[0]', 'self.shmem_image_dimensions[1]'], {}), '(self.shmem_name, n_buf, self.\n shmem_image_dimensions[0], self.shmem_image_dimensions[1])\n', (9206, 9299), False, 'from valkka import core\n'), ((9573, 9715), 'valkka.core.SwScaleFrameFilter', 'core.SwScaleFrameFilter', (["('sws_filter' + self.idst)", 'self.shmem_image_dimensions[0]', 'self.shmem_image_dimensions[1]', 'self.shmem_filter'], {}), "('sws_filter' + self.idst, self.\n shmem_image_dimensions[0], self.shmem_image_dimensions[1], self.\n shmem_filter)\n", (9596, 9715), False, 'from valkka import core\n'), ((9786, 9894), 'valkka.core.TimeIntervalFrameFilter', 'core.TimeIntervalFrameFilter', (["('interval_filter' + self.idst)", 'self.shmem_image_interval', 'self.sws_filter'], {}), "('interval_filter' + self.idst, self.\n shmem_image_interval, self.sws_filter)\n", (9814, 9894), False, 'from valkka import core\n'), ((10103, 10196), 'valkka.core.ForkFrameFilter', 'core.ForkFrameFilter', (["('fork_filter' + self.idst)", 'self.gl_in_filter', 'self.interval_filter'], {}), "('fork_filter' + self.idst, self.gl_in_filter, self.\n interval_filter)\n", (10123, 10196), False, 'from valkka import core\n'), ((10448, 10471), 'valkka.core.FrameFifoContext', 'core.FrameFifoContext', ([], {}), '()\n', (10469, 10471), False, 'from valkka import core\n'), ((10715, 10791), 'valkka.core.AVThread', 'core.AVThread', (["('avthread_' + self.idst)", 'self.fork_filter', 'self.framefifo_ctx'], {}), "('avthread_' + self.idst, self.fork_filter, self.framefifo_ctx)\n", (10728, 10791), False, 'from valkka import core\n')] |
import os.path
from uuid import uuid4
def save_image(image, save_to='.'):
"""
Save image to local dick
"""
suffix = '.jpg'
if image.mode == 'P':
image = image.convert('RGBA')
if image.mode == 'RGBA':
suffix = '.png'
filename = uuid4().hex + suffix
if not os.path.isdir(save_to):
os.makedirs(save_to)
filename = os.path.join(save_to, filename)
image.save(filename)
return filename
| [
"uuid.uuid4"
] | [((276, 283), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (281, 283), False, 'from uuid import uuid4\n')] |
#
# File:
# color4.py
#
# Synopsis:
# Draws sixteen sample color boxs with RGB labels.
#
# Category:
# Colors
#
# Author:
# <NAME>
#
# Date of initial publication:
# January, 2006
#
# Description:
# This example draws sixteen color boxes using the RGB
# values for named colors. The boxes are labeled with
# the color name and the associated RGB values.
#
# Effects illustrated:
# o Drawing lines and polygons in NDC space.
# o RGB equivalents for some named colors.
# o Converting integer RGB color specifications to floating point.
#
# Output:
# o One plot is produced with sixteen sample color boxes.
#
from __future__ import print_function
import Ngl
import numpy
#
# Define the colors and labels to be used.
#
colors_and_labels = \
[ \
[233, 150, 122], "DarkSalmon", \
[164, 42, 42], "Brown", \
[255, 127, 0], "DarkOrange1", \
[255, 0, 0], "Red", \
[255, 255, 0], "Yellow", \
[ 0, 255, 0], "Green", \
[ 34, 139, 34], "ForestGreen", \
[ 0, 255, 255], "Cyan", \
[ 79, 148, 205], "SteelBlue3", \
[ 0, 0, 255], "Blue", \
[148, 0, 211], "DarkViolet", \
[255, 0, 255], "Magneta", \
[255, 255, 255], "White", \
[153, 153, 153], "Gray60", \
[102, 102, 102], "Gray40", \
[ 0, 0, 0], "Black" \
]
#
# Open a workstation with a default color table having
# background color "black" and foreground color "white".
#
rlist = Ngl.Resources()
rlist.wkColorMap = "default"
rlist.wkForegroundColor = "White"
rlist.wkBackgroundColor = "Black"
wks_type = "png"
wks = Ngl.open_wks(wks_type,"color4",rlist)
#
# Extract the colors and labels.
#
colors = colors_and_labels[0:len(colors_and_labels):2]
labels = colors_and_labels[1:len(colors_and_labels):2]
#
# Set up arrays and resource lists for drawing the boxes.
# Select "Helvetica-Bold" for all text.
#
x = numpy.zeros(5,'f')
y = numpy.zeros(5,'f')
poly_res = Ngl.Resources()
text_res = Ngl.Resources()
text_res.txFont = "Helvetica-Bold"
#
# Draw the color boxes and titles.
#
for i in range(0,len(colors)):
#
# delx_0 - horizontal spacing between boxes.
# delx_1 - width of a box.
# dely_0 - vertical spacing between boxes.
# dely_1 - height of a box.
#
delx_0, delx_1, dely_0, dely_1 = 0.245, 0.235, 0.22, 0.15
x[0], y[0] = 0.015 + delx_0*(i%4), 0.90 - (i//4)*dely_0
x[1], y[1] = x[0] + delx_1 , y[0]
x[2], y[2] = x[1] , y[1] - dely_1
x[3], y[3] = x[0] , y[2]
x[4], y[4] = x[0] , y[0]
#
# Convert the integer color values obtained from the
# named color chart (as entered above) to floating
# point numbers in the range 0. to 1.
#
r, g, b = colors[i][0]/255., colors[i][1]/255., colors[i][2]/255.
poly_res.gsFillColor = [r,g,b] # Ngl.new_color(wks, r, g, b)
#
# Draw a white outline if the color is black, otherwise draw a colored box.
#
if (labels[i] == "Black"):
Ngl.polyline_ndc(wks, x, y, poly_res)
else:
Ngl.polygon_ndc(wks, x, y, poly_res)
#
# Label the boxes.
#
text_res.txFontHeightF = 0.017
Ngl.text_ndc(wks, labels[i], 0.5*(x[0]+x[1]), y[0] + 0.0125, text_res)
rgb_label = "R={:4.2f} G={:4.2f} B={:4.2f}".format(r, g, b)
text_res.txFontHeightF = 0.015
Ngl.text_ndc(wks, rgb_label, 0.5*(x[0]+x[1]), y[3] - 0.0125, text_res)
#
# Plot top and bottom labels.
#
text_res.txFontHeightF = 0.025
Ngl.text_ndc(wks, "Sixteen Sample Colors", 0.5, 0.96, text_res)
text_res.txFontHeightF = 0.018
Ngl.text_ndc(wks, "The titles below each box indicate Red, Green, and Blue intensity values.", 0.5, 0.035, text_res)
Ngl.frame(wks)
Ngl.end()
| [
"Ngl.polyline_ndc",
"Ngl.polygon_ndc",
"Ngl.Resources",
"Ngl.end",
"Ngl.open_wks",
"Ngl.text_ndc",
"numpy.zeros",
"Ngl.frame"
] | [((1636, 1651), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (1649, 1651), False, 'import Ngl\n'), ((1772, 1811), 'Ngl.open_wks', 'Ngl.open_wks', (['wks_type', '"""color4"""', 'rlist'], {}), "(wks_type, 'color4', rlist)\n", (1784, 1811), False, 'import Ngl\n'), ((2069, 2088), 'numpy.zeros', 'numpy.zeros', (['(5)', '"""f"""'], {}), "(5, 'f')\n", (2080, 2088), False, 'import numpy\n'), ((2092, 2111), 'numpy.zeros', 'numpy.zeros', (['(5)', '"""f"""'], {}), "(5, 'f')\n", (2103, 2111), False, 'import numpy\n'), ((2122, 2137), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (2135, 2137), False, 'import Ngl\n'), ((2149, 2164), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (2162, 2164), False, 'import Ngl\n'), ((3569, 3632), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', '"""Sixteen Sample Colors"""', '(0.5)', '(0.96)', 'text_res'], {}), "(wks, 'Sixteen Sample Colors', 0.5, 0.96, text_res)\n", (3581, 3632), False, 'import Ngl\n'), ((3664, 3789), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', '"""The titles below each box indicate Red, Green, and Blue intensity values."""', '(0.5)', '(0.035)', 'text_res'], {}), "(wks,\n 'The titles below each box indicate Red, Green, and Blue intensity values.'\n , 0.5, 0.035, text_res)\n", (3676, 3789), False, 'import Ngl\n'), ((3788, 3802), 'Ngl.frame', 'Ngl.frame', (['wks'], {}), '(wks)\n', (3797, 3802), False, 'import Ngl\n'), ((3803, 3812), 'Ngl.end', 'Ngl.end', ([], {}), '()\n', (3810, 3812), False, 'import Ngl\n'), ((3263, 3337), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', 'labels[i]', '(0.5 * (x[0] + x[1]))', '(y[0] + 0.0125)', 'text_res'], {}), '(wks, labels[i], 0.5 * (x[0] + x[1]), y[0] + 0.0125, text_res)\n', (3275, 3337), False, 'import Ngl\n'), ((3431, 3505), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', 'rgb_label', '(0.5 * (x[0] + x[1]))', '(y[3] - 0.0125)', 'text_res'], {}), '(wks, rgb_label, 0.5 * (x[0] + x[1]), y[3] - 0.0125, text_res)\n', (3443, 3505), False, 'import Ngl\n'), ((3117, 3154), 'Ngl.polyline_ndc', 'Ngl.polyline_ndc', (['wks', 'x', 'y', 'poly_res'], {}), '(wks, x, y, poly_res)\n', (3133, 3154), False, 'import Ngl\n'), ((3167, 3203), 'Ngl.polygon_ndc', 'Ngl.polygon_ndc', (['wks', 'x', 'y', 'poly_res'], {}), '(wks, x, y, poly_res)\n', (3182, 3203), False, 'import Ngl\n')] |
import random
from evaluator import ChessEval
class ChessAI(object):
INF = 8000
def __init__(self,game,color):
self.game = game
self.evaluator = ChessEval(game)
self.color = color
self.drunkMode = False
self.points = {'Pawn': 10, 'Knight': 30, 'Bishop': 30, 'Rook': 50, 'Queen': 90, 'King': 200}
self.depth = 3
def changeLevel(self,level):
self.depth = level + 1
def getLevel(self):
return self.depth - 1
def nextMove(self, playerColor=None):
if playerColor is None:
playerColor = self.color
results = self.game.getAllLegalMoves(playerColor, fullValidate=False)
if len(results) == 0:
return None
move, _ = self.minimax(-self.INF, self.INF, playerColor, playerColor, self.depth)
return move
# Minimax is a completely new concept to me, and I the reference from
# https://www.chessprogramming.org/Search to learn about it
# All of the code is mine, with the exception of alpha beta pruning, which is
# a standard template I got from the website.
def minimax(self, alpha, beta, color, playerColor, depth):
results = self.game.getAllLegalMoves(color, fullValidate=False)
score = self.getScore()
if playerColor == 'White':
score = -score
if depth == 0:
return None, score
if len(results) == 0:
return None, score
if color == 'White':
otherColor = 'Black'
else:
otherColor = 'White'
if playerColor == color:
move = None
maxVal = -self.INF
for i in range(len(results)):
if self.game.movePiece(results[i][0], results[i][1], results[i][2],
aiMode=True, simulate=False):
_, eval = self.minimax(alpha, beta, otherColor, playerColor, depth - 1)
# maxVal = max(eval, maxVal)
if eval > maxVal:
move = results[i]
maxVal = eval
self.game.undoLastMove()
alpha = max(alpha, maxVal)
if beta <= alpha:
break
else:
pass
# moves.append(move)
return (move, maxVal)
else:
minVal = self.INF
move = None
for i in range(len(results)):
if not self.game.movePiece(results[i][0], results[i][1], results[i][2],
aiMode=True, simulate=False):
continue
_, eval = self.minimax(alpha, beta, otherColor, playerColor, depth - 1)
self.game.undoLastMove()
# minVal = min(eval, minVal)
if eval < minVal:
minVal = eval
move = results[i]
beta = min(beta, minVal)
if beta <= alpha:
break
return move, minVal
def getScore(self):
blackScore, whiteScore = self.evaluator.getScore()
if self.game.inCheck('Black') and self.game.checkMate('Black'):
blackScore -= 900
if self.game.inCheck('White') and self.game.checkMate('White'):
whiteScore -= 900
if self.color == 'White':
return whiteScore - blackScore
else:
return blackScore - whiteScore
def getScoreSimple(self):
w = self.getWhiteScore()
b = self.getBlackScore()
if self.color == 'White':
return w - b
else:
return b - w
# Assign large score to checkmate so AI goes for the win
def getWhiteScore(self):
score = 0
for piece in self.game.getPieces():
if piece.color == 'White':
score += self.points[piece.name]
if self.game.inCheck('Black') and self.game.checkMate('Black'):
score += 900
return score
def getBlackScore(self):
score = 0
for piece in self.game.getPieces():
if piece.color == 'Black':
score += self.points[piece.name]
if self.game.inCheck('White') and self.game.checkMate('White'):
score += 900
return score
| [
"evaluator.ChessEval"
] | [((174, 189), 'evaluator.ChessEval', 'ChessEval', (['game'], {}), '(game)\n', (183, 189), False, 'from evaluator import ChessEval\n')] |
import bpy
def Render_Animation():
bpy.ops.object.camera_add(enter_editmode=False, align='VIEW', location=(0, 0, 0), rotation=(1.60443, 0.014596, 2.55805))
bpy.ops.object.light_add(type='SUN', location=(0, 0, 5)) #setting camera and lights for rendering
cam = bpy.data.objects["Camera"]
scene = bpy.context.scene
mesh_objs = [o for o in scene.objects if o.type =='MESH']
for ob in mesh_objs:
ob.select_set(True)
bpy.ops.view3d.camera_to_view_selected()
bpy.context.scene.render.film_transparent = True
bpy.context.scene.render.image_settings.color_mode = 'RGBA'
| [
"bpy.ops.view3d.camera_to_view_selected",
"bpy.ops.object.camera_add",
"bpy.ops.object.light_add"
] | [((40, 165), 'bpy.ops.object.camera_add', 'bpy.ops.object.camera_add', ([], {'enter_editmode': '(False)', 'align': '"""VIEW"""', 'location': '(0, 0, 0)', 'rotation': '(1.60443, 0.014596, 2.55805)'}), "(enter_editmode=False, align='VIEW', location=(0, \n 0, 0), rotation=(1.60443, 0.014596, 2.55805))\n", (65, 165), False, 'import bpy\n'), ((165, 221), 'bpy.ops.object.light_add', 'bpy.ops.object.light_add', ([], {'type': '"""SUN"""', 'location': '(0, 0, 5)'}), "(type='SUN', location=(0, 0, 5))\n", (189, 221), False, 'import bpy\n'), ((449, 489), 'bpy.ops.view3d.camera_to_view_selected', 'bpy.ops.view3d.camera_to_view_selected', ([], {}), '()\n', (487, 489), False, 'import bpy\n')] |
# ----------------------------------------------------------------------------------------------------------------------
# Body Weight test cases
# ----------------------------------------------------------------------------------------------------------------------
# imports
import unittest
import tempfile
import os
import shutil
import logging
from src.Util.config import Config
from src.Util.constants import Constants
class TestConfig(unittest.TestCase):
"""
Class for testing the body weight procedure.
"""
def setUp(self):
"""
Initializes unit test variables.
"""
self.logs_dir = tempfile.mkdtemp()
self.file_path = os.path.join(self.logs_dir, 'test_config.ini')
self.logger = logging.getLogger(__name__)
self.config = Config(logger=self.logger,
output_path=self.logs_dir)
self.section = 'OPTIONS'
self.option = 'water'
def tearDown(self):
"""
Performs any clean up needed.
"""
self.connection = None
if os.path.exists(self.logs_dir):
shutil.rmtree(self.logs_dir)
# ------------------------------------------------------------------------------------------------------------------
# read_config_option tests
# ------------------------------------------------------------------------------------------------------------------
def test_read_config_option_nominal(self):
"""
Checks that the default config file is created properly.
"""
value = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(value, "oz")
def test_read_config_option_bad_option(self):
"""
Attempts to get a bad value in the config file.
"""
with self.assertRaises(KeyError) as error:
self.config.read_config_option(section=self.section,
option="bad")
self.assertTrue('bad' in error.exception)
# ------------------------------------------------------------------------------------------------------------------
# update_config_option tests
# ------------------------------------------------------------------------------------------------------------------
def test_update_config_option_nominal(self):
"""
Updates a config value to be used in the future.
"""
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
water_type = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(value, water_type)
def test_update_config_retain_unique_values(self):
"""
Updating an option should keep unaffected values the same when rewriting.
"""
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
value = '5'
status = self.config.update_config_option(section=self.section,
option='backup_rate',
value=value)
self.assertTrue(status)
water_type = self.config.read_config_option(section=self.section,
option=self.option)
backup_rate = self.config.read_config_option(section=self.section,
option='backup_rate')
self.assertEqual(water_type, 'mL')
self.assertEqual(backup_rate, '5')
def test_update_config_option_bad_section(self):
"""
Attempts to change a config option with a section that does not exist.
"""
status = self.config.update_config_option(section='bad',
option=self.option,
value='mL')
self.assertFalse(status)
def test_update_config_option_bad_option(self):
"""
Attempts to change a config option that does not exist.
"""
status = self.config.update_config_option(section=self.section,
option='bad',
value='mL')
self.assertFalse(status)
# ------------------------------------------------------------------------------------------------------------------
# check_config_file_values tests
# ------------------------------------------------------------------------------------------------------------------
def test_check_config_file_values_nominal(self):
"""
A new default has been added to a section. Add the default value to an already existing config file. The old
config values will remain.
"""
Constants.config_defaults[self.section]['test'] = 'new'
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
self.config.check_config_file_values()
added_default = self.config.read_config_option(section=self.section,
option='test')
self.assertEqual(added_default, 'new')
old_value = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(old_value, 'mL')
# ------------------------------------------------------------------------------------------------------------------
# create_backup_database tests
# ------------------------------------------------------------------------------------------------------------------
def test_create_backup_database_nominal(self):
"""
Creates a backup database when no other backups are present
"""
pass
def test_create_backup_database_already_exists(self):
"""
Checks for a backup database file, and sees that one has been created within the backup rate.
"""
pass
def test_create_backup_database_needed(self):
"""
Checks for a backup database file, one does exist, but a new one is needed.
"""
pass
def test_create_backup_database_no_backup_db_folder(self):
"""
Creates the backup_db folder within the cwd if it does not already exist.
"""
pass
# ----------------------------------------------------------------------------------------------------------------------
# End
# --------------------------------------------------------------------------------------------------------------------
| [
"logging.getLogger",
"os.path.exists",
"os.path.join",
"src.Util.config.Config",
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((643, 661), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (659, 661), False, 'import tempfile\n'), ((687, 733), 'os.path.join', 'os.path.join', (['self.logs_dir', '"""test_config.ini"""'], {}), "(self.logs_dir, 'test_config.ini')\n", (699, 733), False, 'import os\n'), ((756, 783), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (773, 783), False, 'import logging\n'), ((806, 859), 'src.Util.config.Config', 'Config', ([], {'logger': 'self.logger', 'output_path': 'self.logs_dir'}), '(logger=self.logger, output_path=self.logs_dir)\n', (812, 859), False, 'from src.Util.config import Config\n'), ((1081, 1110), 'os.path.exists', 'os.path.exists', (['self.logs_dir'], {}), '(self.logs_dir)\n', (1095, 1110), False, 'import os\n'), ((1124, 1152), 'shutil.rmtree', 'shutil.rmtree', (['self.logs_dir'], {}), '(self.logs_dir)\n', (1137, 1152), False, 'import shutil\n')] |
import re
import time
import requests
from telethon import events
from userbot import CMD_HELP
from userbot.utils import register
import asyncio
import random
EMOJIS = [
"😂",
"😂",
"👌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"😩",
"😏",
"😞",
"👀",
"👅",
"😩",
"🤒",
"😳",
"🤯",
"😵",
"🥵",
"🤒",
"😠",
"😪",
"😴",
"🤤",
"👿",
"👽",
"😏",
"😒",
"😣",
"🤔",
"🤨",
"🧐",
"😝",
"🤪",
"🤩",
"☺️",
"😭",
"🥺",
]
ZALG_LIST = [["̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
@register(outgoing=True, pattern="^.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
if not vpr.text[0].isalpha() and vpr.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await vpr.edit("`Give some text for vapor!`")
return
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
if not stret.text[0].isalpha() and stret.text[0] not in ("/", "#", "@", "!"):
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
return
count = random.randint(3, 10)
reply_text = re.sub(
r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])",
(r"\1"*count),
message
)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
if not zgfy.text[0].isalpha() and zgfy.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
return
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
randint = random.randint(0, 2)
if randint == 0:
charac = charac.strip() + \
random.choice(ZALG_LIST[0]).strip()
elif randint == 1:
charac = charac.strip() + \
random.choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
random.choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
if not cp_e.text[0].isalpha() and cp_e.text[0] not in ("/", "#", "@", "!"):
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
return
reply_text = random.choice(EMOJIS)
b_char = random.choice(
message
).lower() # choose a random character in the message to be substituted with 🅱️
for owo in message:
if owo == " ":
reply_text += random.choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += random.choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(random.getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += random.choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
if not mock.text[0].isalpha() and mock.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
return
for charac in message:
if charac.isalpha() and random.randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
CMD_HELP.update({
"fontstyles": ".cp (text) or .cp reply to message \
\nUsage: inserts some emojis in between the texts\
\n\n.vapor (text) or .vapor reply to message \
\nUsage: Vaporize the given text. \
\n\n.str (text) or .str reply to message \
\nUsage: Stretchs the given message.\
\n\n.zal (text) or .zal reply to message \
\nUsage: Invoke the feeling of chaos.\
\n\n.mock (text) or .mock reply to message \
\nUsage: random capital and small letters in given text.\
"
})
| [
"userbot.utils.register",
"random.choice",
"random.getrandbits",
"re.sub",
"userbot.CMD_HELP.update",
"random.randint"
] | [((2774, 2827), 'userbot.utils.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.vapor(?: |$)(.*)"""'}), "(outgoing=True, pattern='^.vapor(?: |$)(.*)')\n", (2782, 2827), False, 'from userbot.utils import register\n'), ((3590, 3641), 'userbot.utils.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.str(?: |$)(.*)"""'}), "(outgoing=True, pattern='^.str(?: |$)(.*)')\n", (3598, 3641), False, 'from userbot.utils import register\n'), ((4309, 4360), 'userbot.utils.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.zal(?: |$)(.*)"""'}), "(outgoing=True, pattern='^.zal(?: |$)(.*)')\n", (4317, 4360), False, 'from userbot.utils import register\n'), ((5600, 5650), 'userbot.utils.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.cp(?: |$)(.*)"""'}), "(outgoing=True, pattern='^.cp(?: |$)(.*)')\n", (5608, 5650), False, 'from userbot.utils import register\n'), ((6845, 6897), 'userbot.utils.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.mock(?: |$)(.*)"""'}), "(outgoing=True, pattern='^.mock(?: |$)(.*)')\n", (6853, 6897), False, 'from userbot.utils import register\n'), ((7671, 8123), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'fontstyles\':\n """.cp (text) or .cp reply to message \nUsage: inserts some emojis in between the texts\n\n.vapor (text) or .vapor reply to message \nUsage: Vaporize the given text. \n\n.str (text) or .str reply to message \nUsage: Stretchs the given message.\n\n.zal (text) or .zal reply to message \nUsage: Invoke the feeling of chaos.\n\n.mock (text) or .mock reply to message \nUsage: random capital and small letters in given text."""\n }'], {}), '({\'fontstyles\':\n """.cp (text) or .cp reply to message \nUsage: inserts some emojis in between the texts\n\n.vapor (text) or .vapor reply to message \nUsage: Vaporize the given text. \n\n.str (text) or .str reply to message \nUsage: Stretchs the given message.\n\n.zal (text) or .zal reply to message \nUsage: Invoke the feeling of chaos.\n\n.mock (text) or .mock reply to message \nUsage: random capital and small letters in given text."""\n })\n', (7686, 8123), False, 'from userbot import CMD_HELP\n'), ((4110, 4131), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (4124, 4131), False, 'import random\n'), ((4153, 4221), 're.sub', 're.sub', (['"""([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])"""', "('\\\\1' * count)", 'message'], {}), "('([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])', '\\\\1' * count, message)\n", (4159, 4221), False, 'import re\n'), ((6112, 6133), 'random.choice', 'random.choice', (['EMOJIS'], {}), '(EMOJIS)\n', (6125, 6133), False, 'import random\n'), ((6776, 6797), 'random.choice', 'random.choice', (['EMOJIS'], {}), '(EMOJIS)\n', (6789, 6797), False, 'import random\n'), ((5072, 5092), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (5086, 5092), False, 'import random\n'), ((6151, 6173), 'random.choice', 'random.choice', (['message'], {}), '(message)\n', (6164, 6173), False, 'import random\n'), ((6359, 6380), 'random.choice', 'random.choice', (['EMOJIS'], {}), '(EMOJIS)\n', (6372, 6380), False, 'import random\n'), ((7420, 7440), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (7434, 7440), False, 'import random\n'), ((6477, 6498), 'random.choice', 'random.choice', (['EMOJIS'], {}), '(EMOJIS)\n', (6490, 6498), False, 'import random\n'), ((6616, 6637), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (6634, 6637), False, 'import random\n'), ((5199, 5226), 'random.choice', 'random.choice', (['ZALG_LIST[0]'], {}), '(ZALG_LIST[0])\n', (5212, 5226), False, 'import random\n'), ((5342, 5369), 'random.choice', 'random.choice', (['ZALG_LIST[1]'], {}), '(ZALG_LIST[1])\n', (5355, 5369), False, 'import random\n'), ((5472, 5499), 'random.choice', 'random.choice', (['ZALG_LIST[2]'], {}), '(ZALG_LIST[2])\n', (5485, 5499), False, 'import random\n')] |
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="test_drawing_objects.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import os
import dateutil.parser
import asposewordscloud.models.requests
from test.base_test_context import BaseTestContext
#
# Example of how to get drawing objects.
#
class TestDrawingObjects(BaseTestContext):
#
# Test for getting drawing objects from document.
#
def test_get_document_drawing_objects(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjects.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectsRequest(name=remoteFileName, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_objects, 'Validate GetDocumentDrawingObjects response')
self.assertIsNotNone(result.drawing_objects.list, 'Validate GetDocumentDrawingObjects response')
self.assertEqual(1, len(result.drawing_objects.list))
#
# Test for getting drawing objects from document without node path.
#
def test_get_document_drawing_objects_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectsWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectsRequest(name=remoteFileName, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_objects, 'Validate GetDocumentDrawingObjectsWithoutNodePath response')
self.assertIsNotNone(result.drawing_objects.list, 'Validate GetDocumentDrawingObjectsWithoutNodePath response')
self.assertEqual(1, len(result.drawing_objects.list))
#
# Test for getting drawing object by specified index.
#
def test_get_document_drawing_object_by_index(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndex.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectByIndexRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_by_index(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate GetDocumentDrawingObjectByIndex response')
self.assertEqual(300.0, result.drawing_object.height)
#
# Test for getting drawing object by specified index without node path.
#
def test_get_document_drawing_object_by_index_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectByIndexRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_by_index(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate GetDocumentDrawingObjectByIndexWithoutNodePath response')
self.assertEqual(300.0, result.drawing_object.height)
#
# Test for getting drawing object by specified index and format.
#
def test_render_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithFormat.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.RenderDrawingObjectRequest(name=remoteFileName, format='png', index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.render_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object by specified index and format without node path.
#
def test_render_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithFormatWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.RenderDrawingObjectRequest(name=remoteFileName, format='png', index=0, folder=remoteDataFolder)
result = self.words_api.render_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for reading drawing object's image data.
#
def test_get_document_drawing_object_image_data(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectImageData.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectImageDataRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_image_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for reading drawing object's image data without node path.
#
def test_get_document_drawing_object_image_data_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectImageDataWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectImageDataRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_image_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object OLE data.
#
def test_get_document_drawing_object_ole_data(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localDrawingFile = 'DocumentElements/DrawingObjects/sample_EmbeddedOLE.docx'
remoteFileName = 'TestGetDocumentDrawingObjectOleData.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localDrawingFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectOleDataRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_ole_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object OLE data without node path.
#
def test_get_document_drawing_object_ole_data_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localDrawingFile = 'DocumentElements/DrawingObjects/sample_EmbeddedOLE.docx'
remoteFileName = 'TestGetDocumentDrawingObjectOleDataWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localDrawingFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectOleDataRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_ole_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for adding drawing object.
#
def test_insert_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestInsetDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectInsert(height=0.0, left=0.0, top=0.0, width=0.0, relative_horizontal_position='Margin', relative_vertical_position='Margin', wrap_type='Inline')
request = asposewordscloud.models.requests.InsertDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), node_path='', folder=remoteDataFolder)
result = self.words_api.insert_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate InsertDrawingObject response')
self.assertEqual('0.3.7.1', result.drawing_object.node_id)
#
# Test for adding drawing object without node path.
#
def test_insert_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestInsetDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectInsert(height=0.0, left=0.0, top=0.0, width=0.0, relative_horizontal_position='Margin', relative_vertical_position='Margin', wrap_type='Inline')
request = asposewordscloud.models.requests.InsertDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), folder=remoteDataFolder)
result = self.words_api.insert_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate InsertDrawingObjectWithoutNodePath response')
self.assertEqual('0.3.7.1', result.drawing_object.node_id)
#
# Test for deleting drawing object.
#
def test_delete_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDeleteDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DeleteDrawingObjectRequest(name=remoteFileName, index=0, node_path='', folder=remoteDataFolder)
self.words_api.delete_drawing_object(request)
#
# Test for deleting drawing object without node path.
#
def test_delete_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDeleteDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DeleteDrawingObjectRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
self.words_api.delete_drawing_object(request)
#
# Test for updating drawing object.
#
def test_update_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestUpdateDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectUpdate(left=1.0)
request = asposewordscloud.models.requests.UpdateDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), index=0, node_path='', folder=remoteDataFolder)
result = self.words_api.update_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate UpdateDrawingObject response')
self.assertEqual(1.0, result.drawing_object.left)
#
# Test for updating drawing object without node path.
#
def test_update_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestUpdateDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectUpdate(left=1.0)
request = asposewordscloud.models.requests.UpdateDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), index=0, folder=remoteDataFolder)
result = self.words_api.update_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate UpdateDrawingObjectWithoutNodePath response')
self.assertEqual(1.0, result.drawing_object.left)
| [
"os.path.join"
] | [((1994, 2041), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (2006, 2041), False, 'import os\n'), ((3044, 3091), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (3056, 3091), False, 'import os\n'), ((4067, 4114), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (4079, 4114), False, 'import os\n'), ((5058, 5105), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (5070, 5105), False, 'import os\n'), ((5995, 6042), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (6007, 6042), False, 'import os\n'), ((6813, 6860), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (6825, 6860), False, 'import os\n'), ((7548, 7595), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (7560, 7595), False, 'import os\n'), ((8375, 8422), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (8387, 8422), False, 'import os\n'), ((9153, 9207), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localDrawingFile'], {}), '(self.local_test_folder, localDrawingFile)\n', (9165, 9207), False, 'import os\n'), ((10009, 10063), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localDrawingFile'], {}), '(self.local_test_folder, localDrawingFile)\n', (10021, 10063), False, 'import os\n'), ((10718, 10765), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (10730, 10765), False, 'import os\n'), ((11939, 11986), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (11951, 11986), False, 'import os\n'), ((13113, 13160), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (13125, 13160), False, 'import os\n'), ((13787, 13834), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (13799, 13834), False, 'import os\n'), ((14396, 14443), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (14408, 14443), False, 'import os\n'), ((15492, 15539), 'os.path.join', 'os.path.join', (['self.local_test_folder', 'localFile'], {}), '(self.local_test_folder, localFile)\n', (15504, 15539), False, 'import os\n'), ((11133, 11196), 'os.path.join', 'os.path.join', (['self.local_test_folder', '"""Common/aspose-cloud.png"""'], {}), "(self.local_test_folder, 'Common/aspose-cloud.png')\n", (11145, 11196), False, 'import os\n'), ((12354, 12417), 'os.path.join', 'os.path.join', (['self.local_test_folder', '"""Common/aspose-cloud.png"""'], {}), "(self.local_test_folder, 'Common/aspose-cloud.png')\n", (12366, 12417), False, 'import os\n'), ((14683, 14746), 'os.path.join', 'os.path.join', (['self.local_test_folder', '"""Common/aspose-cloud.png"""'], {}), "(self.local_test_folder, 'Common/aspose-cloud.png')\n", (14695, 14746), False, 'import os\n'), ((15779, 15842), 'os.path.join', 'os.path.join', (['self.local_test_folder', '"""Common/aspose-cloud.png"""'], {}), "(self.local_test_folder, 'Common/aspose-cloud.png')\n", (15791, 15842), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
import freqent.freqentn as fen
import dynamicstructurefactor.sqw as sqw
from itertools import product
import os
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
savepath = '/media/daniel/storage11/Dropbox/LLM_Danny/frequencySpaceDissipation/tests/freqentn_tests/'
plt.close('all')
def create_sphericalWave(wavelength, period, phi,
v=[0, 0],
n_txy=[100, 100, 100],
max_txy=[1, 1, 1],
r0=[0, 0]):
'''
Inputs
------
wavelength : float
wavelength of spherical wave
period : float
period of spherical wave
phi : float
initial phase of wave
v : array-like
drift velocity of wave, in format [vx, vy]
n_txy : list
list of integers for number of time points, x points, y points
max_txy : list
list of floats for total time and total length in x and y dimensions
r0 : array-like
initial position of spherical wave
'''
n_txy = np.asarray(n_txy)
max_txy = np.asarray(max_txy)
sample_spacing = max_txy / n_txy
tArr = np.linspace(0, max_txy[0], n_txy[0])
xArr = np.linspace(-max_txy[1] / 2, max_txy[1] / 2, n_txy[1])
yArr = np.linspace(-max_txy[2] / 2, max_txy[2] / 2, n_txy[2])
t, x, y = np.meshgrid(tArr, xArr, yArr, indexing='ij')
k = 2 * np.pi / wavelength
w = 2 * np.pi / period
r = np.sqrt((x - r0[0] - (v[0] * t))**2 + (y - r0[1] - (v[1] * t))**2)
wave = np.cos(k * r - w * t + phi)
return wave, t, x, y
# Set up parameters
xmax = 6 * np.pi # total distance in physical units
ymax = 6 * np.pi
tmax = 100
nx = 250 # total number of pixels across
ny = 250
nt = 100
dx = xmax / nx # sampling spacing
dy = ymax / ny
dt = tmax / nt
xArr = np.linspace(-xmax / 2, xmax / 2, nx)
yArr = np.linspace(-ymax / 2, ymax / 2, ny)
tArr = np.linspace(0, tmax, nt)
# Set up grid in real space, remembering to multiply by the
# sampling periods in time and space
tt, xx, yy = np.meshgrid(tArr, xArr, yArr, indexing='ij')
# Spatial and temporal frequency (in radians/length or time)
lambda0 = np.pi / 6
k0 = 2 * np.pi / lambda0
T0 = 5
w0 = 2 * np.pi / T0
lambda1 = np.pi / 6
k1 = 2 * np.pi / lambda1
T1 = 5
w1 = 2 * np.pi / T1
# Center offset
x0 = 0 * dx
y0 = 0 * dy
x1 = 0 * dx
y1 = 0 * dy
# phase difference
phi = 1 * np.pi / 2
# Function and its power spectrum
r0 = ((xx - x0)**2 + (yy - y0)**2)**0.5
r1 = ((xx - x1)**2 + (yy - y1)**2)**0.5
r0t = np.cos(k0 * r0 - w0 * tt)
r1t = np.cos(k1 * r1 - w1 * tt + phi)
data = np.zeros((2, *r0t.shape))
data[0] = r0t
data[1] = r1t
c, freqs = fen.corr_matrix(data, sample_spacing=[dt, dx, dy])
c = fen._nd_gauss_smooth(c, stddev=[1, 2, 2])
idx_array = list(product(np.arange(2), repeat=2))
figReal, axReal = plt.subplots(2, 2, sharex=True, sharey=True)
figImag, axImag = plt.subplots(2, 2, sharex=True, sharey=True)
for idx in idx_array:
aziAvg_real, kr_real = sqw.azimuthal_average_3D(c[..., idx[0], idx[1]].real,
dx=2 * np.pi / xmax)
aziAvg_imag, kr_imag = sqw.azimuthal_average_3D(c[..., idx[0], idx[1]].imag,
dx=2 * np.pi / xmax)
axReal[idx[0], idx[1]].pcolormesh(kr_real, freqs[0], aziAvg_real, vmin=-1, vmax=15)
axImag[idx[0], idx[1]].pcolormesh(kr_imag, freqs[0], aziAvg_imag, vmin=-0.3, vmax=0.3)
axReal[1, 0].set(xlabel=r'$k$ (rad/um)', ylabel=r'$\omega$ (rad/s)')
axReal[0, 0].set(ylabel=r'$\omega$ (rad/s)')
axReal[1, 1].set(xlabel=r'$k$ (rad/um)')
figReal.suptitle(r'$\Re[\langle r_i(\mathbf{{k}}, \omega) r_j^*(\mathbf{{k}}, \omega) \rangle]$')
# figReal.savefig(os.path.join(savepath, 'sphericalWaveCSD_Real_smoothed_sigma1.pdf'), format='pdf')
axImag[1, 0].set(xlabel=r'$k$ (rad/um)', ylabel=r'$\omega$ (rad/s)')
axImag[0, 0].set(ylabel=r'$\omega$ (rad/s)')
axImag[1, 1].set(xlabel=r'$k$ (rad/um)')
figImag.suptitle(r'$\Im[\langle r_i(\mathbf{{k}}, \omega) r_j^*(\mathbf{{k}}, \omega) \rangle]$')
# figImag.savefig(os.path.join(savepath, 'sphericalWaveCSD_Imag_smoothed_sigma1.pdf'), format='pdf')
plt.show()
| [
"numpy.sqrt",
"numpy.asarray",
"freqent.freqentn._nd_gauss_smooth",
"matplotlib.pyplot.close",
"freqent.freqentn.corr_matrix",
"numpy.linspace",
"numpy.zeros",
"numpy.cos",
"numpy.meshgrid",
"dynamicstructurefactor.sqw.azimuthal_average_3D",
"matplotlib.pyplot.subplots",
"numpy.arange",
"mat... | [((326, 342), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (335, 342), True, 'import matplotlib.pyplot as plt\n'), ((1853, 1889), 'numpy.linspace', 'np.linspace', (['(-xmax / 2)', '(xmax / 2)', 'nx'], {}), '(-xmax / 2, xmax / 2, nx)\n', (1864, 1889), True, 'import numpy as np\n'), ((1897, 1933), 'numpy.linspace', 'np.linspace', (['(-ymax / 2)', '(ymax / 2)', 'ny'], {}), '(-ymax / 2, ymax / 2, ny)\n', (1908, 1933), True, 'import numpy as np\n'), ((1941, 1965), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax', 'nt'], {}), '(0, tmax, nt)\n', (1952, 1965), True, 'import numpy as np\n'), ((2077, 2121), 'numpy.meshgrid', 'np.meshgrid', (['tArr', 'xArr', 'yArr'], {'indexing': '"""ij"""'}), "(tArr, xArr, yArr, indexing='ij')\n", (2088, 2121), True, 'import numpy as np\n'), ((2555, 2580), 'numpy.cos', 'np.cos', (['(k0 * r0 - w0 * tt)'], {}), '(k0 * r0 - w0 * tt)\n', (2561, 2580), True, 'import numpy as np\n'), ((2587, 2618), 'numpy.cos', 'np.cos', (['(k1 * r1 - w1 * tt + phi)'], {}), '(k1 * r1 - w1 * tt + phi)\n', (2593, 2618), True, 'import numpy as np\n'), ((2627, 2652), 'numpy.zeros', 'np.zeros', (['(2, *r0t.shape)'], {}), '((2, *r0t.shape))\n', (2635, 2652), True, 'import numpy as np\n'), ((2693, 2743), 'freqent.freqentn.corr_matrix', 'fen.corr_matrix', (['data'], {'sample_spacing': '[dt, dx, dy]'}), '(data, sample_spacing=[dt, dx, dy])\n', (2708, 2743), True, 'import freqent.freqentn as fen\n'), ((2748, 2789), 'freqent.freqentn._nd_gauss_smooth', 'fen._nd_gauss_smooth', (['c'], {'stddev': '[1, 2, 2]'}), '(c, stddev=[1, 2, 2])\n', (2768, 2789), True, 'import freqent.freqentn as fen\n'), ((2860, 2904), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, sharex=True, sharey=True)\n', (2872, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2923, 2967), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, sharex=True, sharey=True)\n', (2935, 2967), True, 'import matplotlib.pyplot as plt\n'), ((4189, 4199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4197, 4199), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1102), 'numpy.asarray', 'np.asarray', (['n_txy'], {}), '(n_txy)\n', (1095, 1102), True, 'import numpy as np\n'), ((1117, 1136), 'numpy.asarray', 'np.asarray', (['max_txy'], {}), '(max_txy)\n', (1127, 1136), True, 'import numpy as np\n'), ((1187, 1223), 'numpy.linspace', 'np.linspace', (['(0)', 'max_txy[0]', 'n_txy[0]'], {}), '(0, max_txy[0], n_txy[0])\n', (1198, 1223), True, 'import numpy as np\n'), ((1235, 1289), 'numpy.linspace', 'np.linspace', (['(-max_txy[1] / 2)', '(max_txy[1] / 2)', 'n_txy[1]'], {}), '(-max_txy[1] / 2, max_txy[1] / 2, n_txy[1])\n', (1246, 1289), True, 'import numpy as np\n'), ((1301, 1355), 'numpy.linspace', 'np.linspace', (['(-max_txy[2] / 2)', '(max_txy[2] / 2)', 'n_txy[2]'], {}), '(-max_txy[2] / 2, max_txy[2] / 2, n_txy[2])\n', (1312, 1355), True, 'import numpy as np\n'), ((1371, 1415), 'numpy.meshgrid', 'np.meshgrid', (['tArr', 'xArr', 'yArr'], {'indexing': '"""ij"""'}), "(tArr, xArr, yArr, indexing='ij')\n", (1382, 1415), True, 'import numpy as np\n'), ((1484, 1550), 'numpy.sqrt', 'np.sqrt', (['((x - r0[0] - v[0] * t) ** 2 + (y - r0[1] - v[1] * t) ** 2)'], {}), '((x - r0[0] - v[0] * t) ** 2 + (y - r0[1] - v[1] * t) ** 2)\n', (1491, 1550), True, 'import numpy as np\n'), ((1563, 1590), 'numpy.cos', 'np.cos', (['(k * r - w * t + phi)'], {}), '(k * r - w * t + phi)\n', (1569, 1590), True, 'import numpy as np\n'), ((3018, 3092), 'dynamicstructurefactor.sqw.azimuthal_average_3D', 'sqw.azimuthal_average_3D', (['c[..., idx[0], idx[1]].real'], {'dx': '(2 * np.pi / xmax)'}), '(c[..., idx[0], idx[1]].real, dx=2 * np.pi / xmax)\n', (3042, 3092), True, 'import dynamicstructurefactor.sqw as sqw\n'), ((3172, 3246), 'dynamicstructurefactor.sqw.azimuthal_average_3D', 'sqw.azimuthal_average_3D', (['c[..., idx[0], idx[1]].imag'], {'dx': '(2 * np.pi / xmax)'}), '(c[..., idx[0], idx[1]].imag, dx=2 * np.pi / xmax)\n', (3196, 3246), True, 'import dynamicstructurefactor.sqw as sqw\n'), ((2816, 2828), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (2825, 2828), True, 'import numpy as np\n')] |
from flask import url_for
from flexmock import flexmock
from packit_service import models
from packit_service.models import CoprBuildModel
from packit_service.service.views import _get_build_info
from tests_requre.conftest import SampleValues
def test_get_build_logs_for_build_pr(clean_before_and_after, a_copr_build_for_pr):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = _get_build_info(a_copr_build_for_pr, build_description="COPR build")
assert "We can't find any info" not in response
assert "Builds for the-namespace/the-repo-name: PR #342" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_copr_build_for_pr.status in response
assert a_copr_build_for_pr.target in response
assert str(a_copr_build_for_pr.srpm_build_id) in response
assert a_copr_build_for_pr.build_logs_url in response
def test_get_build_logs_for_build_branch_push(
clean_before_and_after, a_copr_build_for_branch_push
):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = _get_build_info(
a_copr_build_for_branch_push, build_description="COPR build"
)
assert "We can't find any info" not in response
assert "Builds for the-namespace/the-repo-name: branch build-branch" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_copr_build_for_branch_push.status in response
assert a_copr_build_for_branch_push.target in response
assert str(a_copr_build_for_branch_push.srpm_build_id) in response
assert a_copr_build_for_branch_push.build_logs_url in response
def test_get_build_logs_for_build_release(
clean_before_and_after, a_copr_build_for_release
):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = _get_build_info(a_copr_build_for_release, build_description="COPR build")
assert "We can't find any info" not in response
assert "Builds for the-namespace/the-repo-name: release v1.0.2" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_copr_build_for_release.status in response
assert a_copr_build_for_release.target in response
assert str(a_copr_build_for_release.srpm_build_id) in response
assert a_copr_build_for_release.build_logs_url in response
def test_srpm_logs_view(client, clean_before_and_after, srpm_build_model):
# Logs view uses the id of the SRPMBuildModel not CoprBuildModel
response = client.get(
url_for("builds.get_srpm_build_logs_by_id", id_=srpm_build_model.id)
)
response = response.data.decode()
assert "SRPM build logs" in response
assert str(srpm_build_model.id) in response
assert "some\nboring\nlogs" in response
def test_copr_build_info_view(client, clean_before_and_after, multiple_copr_builds):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
build = CoprBuildModel.get_by_build_id(123456, SampleValues.chroots[0])
build.set_build_logs_url(
"https://copr.somewhere/results/owner/package/target/build.logs"
)
response = client.get(url_for("builds.copr_build_info", id_=str(build.id)))
response = response.data.decode()
assert "Builds for the-namespace/the-repo-name: PR #342" in response
assert "2020-05-19 16:17:14 UTC" in response
assert build.status in response
assert build.target in response
assert str(build.srpm_build_id) in response
assert build.build_logs_url in response
def test_koji_build_info_view(client, clean_before_and_after, a_koji_build_for_pr):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = client.get(
url_for("builds.koji_build_info", id_=str(a_koji_build_for_pr.id))
)
response = response.data.decode()
assert "Builds for the-namespace/the-repo-name: PR #342" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_koji_build_for_pr.status in response
assert a_koji_build_for_pr.target in response
assert str(a_koji_build_for_pr.srpm_build_id) in response
assert a_koji_build_for_pr.build_logs_url in response
| [
"packit_service.models.CoprBuildModel.get_by_build_id",
"flexmock.flexmock",
"packit_service.service.views._get_build_info",
"flask.url_for"
] | [((450, 518), 'packit_service.service.views._get_build_info', '_get_build_info', (['a_copr_build_for_pr'], {'build_description': '"""COPR build"""'}), "(a_copr_build_for_pr, build_description='COPR build')\n", (465, 518), False, 'from packit_service.service.views import _get_build_info\n'), ((1143, 1220), 'packit_service.service.views._get_build_info', '_get_build_info', (['a_copr_build_for_branch_push'], {'build_description': '"""COPR build"""'}), "(a_copr_build_for_branch_push, build_description='COPR build')\n", (1158, 1220), False, 'from packit_service.service.views import _get_build_info\n'), ((1899, 1972), 'packit_service.service.views._get_build_info', '_get_build_info', (['a_copr_build_for_release'], {'build_description': '"""COPR build"""'}), "(a_copr_build_for_release, build_description='COPR build')\n", (1914, 1972), False, 'from packit_service.service.views import _get_build_info\n'), ((3027, 3090), 'packit_service.models.CoprBuildModel.get_by_build_id', 'CoprBuildModel.get_by_build_id', (['(123456)', 'SampleValues.chroots[0]'], {}), '(123456, SampleValues.chroots[0])\n', (3057, 3090), False, 'from packit_service.models import CoprBuildModel\n'), ((2575, 2643), 'flask.url_for', 'url_for', (['"""builds.get_srpm_build_logs_by_id"""'], {'id_': 'srpm_build_model.id'}), "('builds.get_srpm_build_logs_by_id', id_=srpm_build_model.id)\n", (2582, 2643), False, 'from flask import url_for\n'), ((333, 349), 'flexmock.flexmock', 'flexmock', (['models'], {}), '(models)\n', (341, 349), False, 'from flexmock import flexmock\n'), ((1026, 1042), 'flexmock.flexmock', 'flexmock', (['models'], {}), '(models)\n', (1034, 1042), False, 'from flexmock import flexmock\n'), ((1782, 1798), 'flexmock.flexmock', 'flexmock', (['models'], {}), '(models)\n', (1790, 1798), False, 'from flexmock import flexmock\n'), ((2913, 2929), 'flexmock.flexmock', 'flexmock', (['models'], {}), '(models)\n', (2921, 2929), False, 'from flexmock import flexmock\n'), ((3695, 3711), 'flexmock.flexmock', 'flexmock', (['models'], {}), '(models)\n', (3703, 3711), False, 'from flexmock import flexmock\n')] |
# -*- coding: utf-8 -*-
""""
Bandidos estocásticos: introducción, algoritmos y experimentos
TFG Informática
Sección 8.4.4
Figuras 26, 27 y 28
Autor: <NAME>
"""
import math
import random
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def computemTeor(n,Delta):
if Delta == 0:
return 0
else :
return max(1,math.ceil(4/(Delta*Delta)*math.log(n*Delta*Delta/4)))
def computemOpt(n,Delta):
expectedRegret = np.empty(n//2+1)
X = stats.norm(0,1)
expectedRegret[0] = 0.5*n*Delta
for m in range(1,n//2+1):
expectedRegret[m] = m*Delta+(n-m)*Delta*X.cdf(-m*Delta/math.sqrt(2*m))
mOpt = min(range(n//2+1),key = lambda i: expectedRegret[i])
return mOpt
def samplePseudoRegretEF(n,k,m,arms,gaps):
rwds = k*[0]
for i in range(m):
for j in range(k):
rwds[j] += arms[j].rvs()
maximum = max(rwds)
bestarm = random.choice([i for i in range(k) if rwds[i] == maximum])
return m*sum(gaps)+(n-m*k)*gaps[bestarm]
def samplePseudoRegretUCB(n,k,delta,arms,gaps):#cambiar a pseudo
T = k*[0] # número de veces que se ha elegido cada brazo
meanReward = k*[0] # media muestral de las recompensas obtenidas por cada brazo
UCB = k*[np.inf] # cota superior de confianza de cada brazo
regret = 0
for i in range(n):
chosenArm = max(range(k),key=lambda i: UCB[i])
rwd = arms[chosenArm].rvs()
meanReward[chosenArm] = T[chosenArm]/(T[chosenArm]+1)*meanReward[chosenArm] \
+ rwd/(T[chosenArm]+1)
T[chosenArm] +=1
UCB[chosenArm] = meanReward[chosenArm] + math.sqrt((2*math.log(1/delta))/T[chosenArm])
regret += gaps[chosenArm]
return regret
def plotDeltaRegret():
n = 1000
sampleNum = 600
arms = 2*[None]
arms[0] = stats.norm(0,1)
gaps = 2*[0]
nDeltas = 20
Deltas = np.linspace(0,1,nDeltas)
regretEF25 = np.empty(nDeltas)
regretEF50 = np.empty(nDeltas)
regretEF75 = np.empty(nDeltas)
regretEF100 = np.empty(nDeltas)
regretEFmTeor = np.empty(nDeltas)
regretEFOptimo = np.empty(nDeltas)
regretUCB = np.empty(nDeltas)
mTeor = nDeltas*[0]
mOpt = nDeltas*[0]
for i in range(nDeltas):
Delta = Deltas[i]
arms[1]= stats.norm(-Delta,1)
gaps[1] = Delta
regretEF25[i] = 0
for k in range(sampleNum):
regretEF25[i] += samplePseudoRegretEF(n,2,25,arms,gaps)
regretEF25[i] /= sampleNum
regretEF50[i] = 0
for k in range(sampleNum):
regretEF50[i] += samplePseudoRegretEF(n,2,50,arms,gaps)
regretEF50[i] /= sampleNum
regretEF75[i] = 0
for k in range(sampleNum):
regretEF75[i] += samplePseudoRegretEF(n,2,75,arms,gaps)
regretEF75[i] /= sampleNum
regretEF100[i] = 0
for k in range(sampleNum):
regretEF100[i] += samplePseudoRegretEF(n,2,100,arms,gaps)
regretEF100[i] /= sampleNum
regretEFmTeor[i]= 0
mTeor[i] = computemTeor(n,Delta)
for k in range(sampleNum):
regretEFmTeor[i] += samplePseudoRegretEF(n,2,mTeor[i],arms,gaps)
regretEFmTeor[i] /= sampleNum
regretEFOptimo[i] = 0
mOpt[i] = computemOpt(n,Delta)
for k in range(sampleNum):
regretEFOptimo[i] += samplePseudoRegretEF(n,2,mOpt[i],arms,gaps)
regretEFOptimo[i] /= sampleNum
regretUCB[i] = 0
for k in range(sampleNum):
regretUCB[i] += samplePseudoRegretUCB(n,2,1/(n*n),arms,gaps)
regretUCB[i] /= sampleNum
fig = plt.figure()
plt.plot(Deltas,regretEF25, color='tab:blue',label= 'EP (m = 25)')
plt.plot(Deltas,regretEF50, color='tab:green',label = 'EP (m = 50)')
plt.plot(Deltas,regretEF75, color='tab:olive',label = 'EP (m = 75)')
plt.plot(Deltas,regretEF100, color='tab:red', label = 'EP (m = 100)')
plt.plot(Deltas,regretEFmTeor, color='tab:purple',label = 'EP (m = m_Teor)')
plt.plot(Deltas,regretEFOptimo, color='tab:gray', label = 'EP (m = m_Opt)')
plt.plot(Deltas,regretUCB, color='black', label = 'UCB')
plt.xlabel('∆')
plt.ylabel('Remordimiento esperado')
plt.legend(loc='upper left',ncol = 2)
fig.savefig('UCBDeltaRegret.pdf',format='pdf')
plt.show()
fig = plt.figure()
plt.plot(Deltas, mTeor, color='tab:purple', label = 'm_Teor')
plt.plot(Deltas,mOpt, color = 'tab:gray', label = 'm_Opt')
plt.xlabel('∆')
plt.ylabel('m')
plt.legend(loc='upper left')
fig.savefig('ms.pdf',format='pdf')
plt.show()
def plotDeltaRegret2():
n = 1000
sampleNum = 600
arms = 2*[None]
arms[0] = stats.norm(0,1)
gaps = 2*[0]
nDeltas = 20
Deltas = np.linspace(0,1,nDeltas)
regretEF25 = np.empty(nDeltas)
regretEF100 = np.empty(nDeltas)
regretEFOptimo = np.empty(nDeltas)
regretUCB0 = np.empty(nDeltas)
regretUCB2 = np.empty(nDeltas)
regretUCB4 = np.empty(nDeltas)
regretUCB6 = np.empty(nDeltas)
regretUCB8 = np.empty(nDeltas)
mOpt = nDeltas*[0]
for i in range(nDeltas):
Delta = Deltas[i]
arms[1]= stats.norm(-Delta,1)
gaps[1] = Delta
regretEF25[i] = 0
for k in range(sampleNum):
regretEF25[i] += samplePseudoRegretEF(n,2,25,arms,gaps)
regretEF25[i] /= sampleNum
regretEF100[i] = 0
for k in range(sampleNum):
regretEF100[i] += samplePseudoRegretEF(n,2,100,arms,gaps)
regretEF100[i] /= sampleNum
regretEFOptimo[i] = 0
mOpt[i] = computemOpt(n,Delta)
for k in range(sampleNum):
regretEFOptimo[i] += samplePseudoRegretEF(n,2,mOpt[i],arms,gaps)
regretEFOptimo[i] /= sampleNum
regretUCB0[i] = 0
for k in range(sampleNum):
regretUCB0[i] += samplePseudoRegretUCB(n,2,1,arms,gaps)
regretUCB0[i] /= sampleNum
regretUCB2[i] = 0
for k in range(sampleNum):
regretUCB2[i] += samplePseudoRegretUCB(n,2,1/100,arms,gaps)
regretUCB2[i] /= sampleNum
regretUCB4[i] = 0
for k in range(sampleNum):
regretUCB4[i] += samplePseudoRegretUCB(n,2,1/10000,arms,gaps)
regretUCB4[i] /= sampleNum
regretUCB6[i] = 0
for k in range(sampleNum):
regretUCB6[i] += samplePseudoRegretUCB(n,2,1/(n*n),arms,gaps)
regretUCB6[i] /= sampleNum
regretUCB8[i] = 0
for k in range(sampleNum):
regretUCB8[i] += samplePseudoRegretUCB(n,2,1/(10**8),arms,gaps)
regretUCB8[i] /= sampleNum
fig = plt.figure()
plt.plot(Deltas,regretEF25, color='tab:blue',label= 'EP (m = 25)')
plt.plot(Deltas,regretEF100, color='tab:red', label = 'EP (m = 100)')
plt.plot(Deltas,regretEFOptimo, color='tab:gray', label = 'EP (m = m_Opt)')
plt.plot(Deltas,regretUCB0, color='salmon', label = 'UCB (δ = 1)')
plt.plot(Deltas,regretUCB2, color='gold', label = 'UCB (δ = 1/100)')
plt.plot(Deltas,regretUCB4, color='mediumspringgreen', label = 'UCB (δ = 1/10⁴)')
plt.plot(Deltas,regretUCB6, color='black', label = 'UCB (δ = 1/10⁶)')
plt.plot(Deltas,regretUCB8, color='indigo', label = 'UCB (δ = 1/10⁸)')
plt.xlabel('∆')
plt.ylabel('Remordimiento esperado')
plt.legend(loc='upper left',ncol = 2)
fig.savefig('UCBDeltaRegret2.pdf',format='pdf')
plt.show()
plotDeltaRegret()
#plotDeltaRegret2()
| [
"matplotlib.pyplot.ylabel",
"scipy.stats.norm",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sqrt",
"math.log",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.empty",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((486, 506), 'numpy.empty', 'np.empty', (['(n // 2 + 1)'], {}), '(n // 2 + 1)\n', (494, 506), True, 'import numpy as np\n'), ((512, 528), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (522, 528), True, 'import scipy.stats as stats\n'), ((1994, 2010), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (2004, 2010), True, 'import scipy.stats as stats\n'), ((2066, 2092), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nDeltas'], {}), '(0, 1, nDeltas)\n', (2077, 2092), True, 'import numpy as np\n'), ((2109, 2126), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2117, 2126), True, 'import numpy as np\n'), ((2145, 2162), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2153, 2162), True, 'import numpy as np\n'), ((2181, 2198), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2189, 2198), True, 'import numpy as np\n'), ((2218, 2235), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2226, 2235), True, 'import numpy as np\n'), ((2257, 2274), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2265, 2274), True, 'import numpy as np\n'), ((2297, 2314), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2305, 2314), True, 'import numpy as np\n'), ((2332, 2349), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2340, 2349), True, 'import numpy as np\n'), ((3996, 4008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4006, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4014, 4081), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF25'], {'color': '"""tab:blue"""', 'label': '"""EP (m = 25)"""'}), "(Deltas, regretEF25, color='tab:blue', label='EP (m = 25)')\n", (4022, 4081), True, 'import matplotlib.pyplot as plt\n'), ((4086, 4154), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF50'], {'color': '"""tab:green"""', 'label': '"""EP (m = 50)"""'}), "(Deltas, regretEF50, color='tab:green', label='EP (m = 50)')\n", (4094, 4154), True, 'import matplotlib.pyplot as plt\n'), ((4160, 4228), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF75'], {'color': '"""tab:olive"""', 'label': '"""EP (m = 75)"""'}), "(Deltas, regretEF75, color='tab:olive', label='EP (m = 75)')\n", (4168, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4302), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF100'], {'color': '"""tab:red"""', 'label': '"""EP (m = 100)"""'}), "(Deltas, regretEF100, color='tab:red', label='EP (m = 100)')\n", (4242, 4302), True, 'import matplotlib.pyplot as plt\n'), ((4309, 4385), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEFmTeor'], {'color': '"""tab:purple"""', 'label': '"""EP (m = m_Teor)"""'}), "(Deltas, regretEFmTeor, color='tab:purple', label='EP (m = m_Teor)')\n", (4317, 4385), True, 'import matplotlib.pyplot as plt\n'), ((4391, 4465), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEFOptimo'], {'color': '"""tab:gray"""', 'label': '"""EP (m = m_Opt)"""'}), "(Deltas, regretEFOptimo, color='tab:gray', label='EP (m = m_Opt)')\n", (4399, 4465), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4527), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB'], {'color': '"""black"""', 'label': '"""UCB"""'}), "(Deltas, regretUCB, color='black', label='UCB')\n", (4480, 4527), True, 'import matplotlib.pyplot as plt\n'), ((4534, 4549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""∆"""'], {}), "('∆')\n", (4544, 4549), True, 'import matplotlib.pyplot as plt\n'), ((4555, 4591), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Remordimiento esperado"""'], {}), "('Remordimiento esperado')\n", (4565, 4591), True, 'import matplotlib.pyplot as plt\n'), ((4597, 4633), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'ncol': '(2)'}), "(loc='upper left', ncol=2)\n", (4607, 4633), True, 'import matplotlib.pyplot as plt\n'), ((4692, 4702), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4700, 4702), True, 'import matplotlib.pyplot as plt\n'), ((4720, 4732), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4730, 4732), True, 'import matplotlib.pyplot as plt\n'), ((4738, 4797), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'mTeor'], {'color': '"""tab:purple"""', 'label': '"""m_Teor"""'}), "(Deltas, mTeor, color='tab:purple', label='m_Teor')\n", (4746, 4797), True, 'import matplotlib.pyplot as plt\n'), ((4805, 4860), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'mOpt'], {'color': '"""tab:gray"""', 'label': '"""m_Opt"""'}), "(Deltas, mOpt, color='tab:gray', label='m_Opt')\n", (4813, 4860), True, 'import matplotlib.pyplot as plt\n'), ((4869, 4884), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""∆"""'], {}), "('∆')\n", (4879, 4884), True, 'import matplotlib.pyplot as plt\n'), ((4890, 4905), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""m"""'], {}), "('m')\n", (4900, 4905), True, 'import matplotlib.pyplot as plt\n'), ((4911, 4939), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (4921, 4939), True, 'import matplotlib.pyplot as plt\n'), ((4985, 4995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4993, 4995), True, 'import matplotlib.pyplot as plt\n'), ((5108, 5124), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (5118, 5124), True, 'import scipy.stats as stats\n'), ((5180, 5206), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nDeltas'], {}), '(0, 1, nDeltas)\n', (5191, 5206), True, 'import numpy as np\n'), ((5223, 5240), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5231, 5240), True, 'import numpy as np\n'), ((5260, 5277), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5268, 5277), True, 'import numpy as np\n'), ((5300, 5317), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5308, 5317), True, 'import numpy as np\n'), ((5342, 5359), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5350, 5359), True, 'import numpy as np\n'), ((5378, 5395), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5386, 5395), True, 'import numpy as np\n'), ((5414, 5431), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5422, 5431), True, 'import numpy as np\n'), ((5450, 5467), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5458, 5467), True, 'import numpy as np\n'), ((5486, 5503), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5494, 5503), True, 'import numpy as np\n'), ((7294, 7306), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7304, 7306), True, 'import matplotlib.pyplot as plt\n'), ((7312, 7379), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF25'], {'color': '"""tab:blue"""', 'label': '"""EP (m = 25)"""'}), "(Deltas, regretEF25, color='tab:blue', label='EP (m = 25)')\n", (7320, 7379), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7452), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF100'], {'color': '"""tab:red"""', 'label': '"""EP (m = 100)"""'}), "(Deltas, regretEF100, color='tab:red', label='EP (m = 100)')\n", (7392, 7452), True, 'import matplotlib.pyplot as plt\n'), ((7459, 7533), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEFOptimo'], {'color': '"""tab:gray"""', 'label': '"""EP (m = m_Opt)"""'}), "(Deltas, regretEFOptimo, color='tab:gray', label='EP (m = m_Opt)')\n", (7467, 7533), True, 'import matplotlib.pyplot as plt\n'), ((7540, 7605), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB0'], {'color': '"""salmon"""', 'label': '"""UCB (δ = 1)"""'}), "(Deltas, regretUCB0, color='salmon', label='UCB (δ = 1)')\n", (7548, 7605), True, 'import matplotlib.pyplot as plt\n'), ((7612, 7679), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB2'], {'color': '"""gold"""', 'label': '"""UCB (δ = 1/100)"""'}), "(Deltas, regretUCB2, color='gold', label='UCB (δ = 1/100)')\n", (7620, 7679), True, 'import matplotlib.pyplot as plt\n'), ((7686, 7771), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB4'], {'color': '"""mediumspringgreen"""', 'label': '"""UCB (δ = 1/10⁴)"""'}), "(Deltas, regretUCB4, color='mediumspringgreen', label='UCB (δ = 1/10⁴)'\n )\n", (7694, 7771), True, 'import matplotlib.pyplot as plt\n'), ((7773, 7841), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB6'], {'color': '"""black"""', 'label': '"""UCB (δ = 1/10⁶)"""'}), "(Deltas, regretUCB6, color='black', label='UCB (δ = 1/10⁶)')\n", (7781, 7841), True, 'import matplotlib.pyplot as plt\n'), ((7848, 7917), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB8'], {'color': '"""indigo"""', 'label': '"""UCB (δ = 1/10⁸)"""'}), "(Deltas, regretUCB8, color='indigo', label='UCB (δ = 1/10⁸)')\n", (7856, 7917), True, 'import matplotlib.pyplot as plt\n'), ((7924, 7939), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""∆"""'], {}), "('∆')\n", (7934, 7939), True, 'import matplotlib.pyplot as plt\n'), ((7945, 7981), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Remordimiento esperado"""'], {}), "('Remordimiento esperado')\n", (7955, 7981), True, 'import matplotlib.pyplot as plt\n'), ((7987, 8023), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'ncol': '(2)'}), "(loc='upper left', ncol=2)\n", (7997, 8023), True, 'import matplotlib.pyplot as plt\n'), ((8083, 8093), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8091, 8093), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2517), 'scipy.stats.norm', 'stats.norm', (['(-Delta)', '(1)'], {}), '(-Delta, 1)\n', (2506, 2517), True, 'import scipy.stats as stats\n'), ((5621, 5642), 'scipy.stats.norm', 'stats.norm', (['(-Delta)', '(1)'], {}), '(-Delta, 1)\n', (5631, 5642), True, 'import scipy.stats as stats\n'), ((407, 438), 'math.log', 'math.log', (['(n * Delta * Delta / 4)'], {}), '(n * Delta * Delta / 4)\n', (415, 438), False, 'import math\n'), ((666, 682), 'math.sqrt', 'math.sqrt', (['(2 * m)'], {}), '(2 * m)\n', (675, 682), False, 'import math\n'), ((1764, 1783), 'math.log', 'math.log', (['(1 / delta)'], {}), '(1 / delta)\n', (1772, 1783), False, 'import math\n')] |
import os
import xml.etree.ElementTree as ET
from tempfile import NamedTemporaryFile
ENV_ASSET_DIR_V1 = os.path.join(os.path.dirname(__file__), 'assets_v1')
ENV_ASSET_DIR_V2 = os.path.join(os.path.dirname(__file__), 'assets_v2')
def full_v1_path_for(file_name):
return os.path.join(ENV_ASSET_DIR_V1, file_name)
def full_v2_path_for(file_name, transparent_sawyer=False):
path = os.path.join(ENV_ASSET_DIR_V2, file_name)
if not transparent_sawyer:
return path
fold, file_path = os.path.split(path)
file_path = f"{file_path[:-len('.xml')]}_transparent_sawyer.xml"
new_path = os.path.join(fold, file_path)
tree = ET.parse(path)
tree.getroot() \
.find('worldbody').find('include') \
.set('file', '../objects/assets/xyz_base_transparent.xml')
tree.write(new_path)
return new_path
| [
"os.path.dirname",
"os.path.join",
"xml.etree.ElementTree.parse",
"os.path.split"
] | [((118, 143), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (133, 143), False, 'import os\n'), ((190, 215), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (205, 215), False, 'import os\n'), ((276, 317), 'os.path.join', 'os.path.join', (['ENV_ASSET_DIR_V1', 'file_name'], {}), '(ENV_ASSET_DIR_V1, file_name)\n', (288, 317), False, 'import os\n'), ((390, 431), 'os.path.join', 'os.path.join', (['ENV_ASSET_DIR_V2', 'file_name'], {}), '(ENV_ASSET_DIR_V2, file_name)\n', (402, 431), False, 'import os\n'), ((505, 524), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (518, 524), False, 'import os\n'), ((609, 638), 'os.path.join', 'os.path.join', (['fold', 'file_path'], {}), '(fold, file_path)\n', (621, 638), False, 'import os\n'), ((650, 664), 'xml.etree.ElementTree.parse', 'ET.parse', (['path'], {}), '(path)\n', (658, 664), True, 'import xml.etree.ElementTree as ET\n')] |
"""
LCCS Level 3 Classification
| Class name | Code | Numeric code |
|----------------------------------|-----|-----|
| Cultivated Terrestrial Vegetated | A11 | 111 |
| Natural Terrestrial Vegetated | A12 | 112 |
| Cultivated Aquatic Vegetated | A23 | 123 |
| Natural Aquatic Vegetated | A24 | 124 |
| Artificial Surface | B15 | 215 |
| Natural Surface | B16 | 216 |
| Artificial Water | B27 | 227 |
| Natural Water | B28 | 228 |
"""
import logging
import numpy
#: Required input variables
LCCS_L3_REQUIRED_VARIABLES = ["vegetat_veg_cat",
"aquatic_wat_cat",
"cultman_agr_cat",
"artific_urb_cat",
"artwatr_wat_cat"]
#: LCCS Level 3 Colour Scheme
LCCS_L3_COLOUR_SCHEME = {111 : (192, 255, 0, 255),
112 : (0, 128, 0, 255),
123 : (0, 255, 245, 255),
124 : (0, 192, 122, 255),
215 : (255, 0, 255, 255),
216 : (255, 192, 160, 255),
227 : (0, 155, 255, 255),
228 : (0, 0, 255, 255)}
def colour_lccs_level3(classification_array):
""""
Colour classification array using LCCS Level 3 standard
colour scheme. Returns four arays:
* red
* green
* blue
* alpha
"""
red = numpy.zeros_like(classification_array, dtype=numpy.uint8)
green = numpy.zeros_like(red)
blue = numpy.zeros_like(red)
alpha = numpy.zeros_like(red)
for class_id, colours in LCCS_L3_COLOUR_SCHEME.items():
subset = (classification_array == class_id)
red[subset], green[subset], blue[subset], alpha[subset] = colours
return red, green, blue, alpha
def _check_required_variables(classification_data):
"""
Check requited variables are in xarray
"""
# Check all input variable exist - warning if they don't
for var in LCCS_L3_REQUIRED_VARIABLES:
if var not in classification_data.data_vars:
logging.warning("Required variable {0} not found".format(var))
def classify_lccs_level3(classification_data):
"""
Apply Level 3 LCCS Classification
Requires xarray containing the following variables
* vegetat_veg_cat - Binary mask 1=vegetation, 0=non-vegetation
* aquatic_wat_cat - Binary mask 1=aquatic, 0=non-aquatic
* cultman_agr_cat - Binary mask 1=cultivated/managed, 0=natural
* artific_urb_cat - Binary mask 1=urban, 0=non-urban
* artwatr_wat_cat - Binary mask 1=artificial water, 0=natural water
Returns three arrays:
* level1
* level2
* level3
"""
# Check required input and output variables exist.
_check_required_variables(classification_data)
# Set up arrays for outputs
try:
vegetation = classification_data["vegetat_veg_cat"].values == 1
except KeyError:
raise Exception("No data available for first level of classification "
"(vegetation / non-vegetation), can not proceed")
level3 = numpy.zeros(vegetation.shape, dtype=numpy.uint8)
# Level 1
# Assign level 1 class of primarily vegetated (A,100) or primarily non-vegetated (B,200)
level1 = numpy.where(vegetation, numpy.uint8(100), numpy.uint8(200))
# Level 2
# Assign level 2 class of terrestrial (10) or aquatic (20)
try:
aquatic = classification_data["aquatic_wat_cat"].values == 1
level2 = numpy.where(aquatic, numpy.uint8(20), numpy.uint8(10))
except KeyError:
raise Exception("No data available for second level of classification "
"(aquatic / non-aquatic), can not proceed")
# Level 3
# Assign level 3 (Supercategory) class based on cultivated or artificial
try:
cultivated = classification_data["cultman_agr_cat"].values == 1
# Cultivated Terrestrial Vegetation (A11)
level3[vegetation & ~aquatic & cultivated] = 111
# Cultivated Aquatic Vegetation (A23)
level3[vegetation & aquatic & cultivated] = 123
# Natural Terrestrial Vegetation (A12)
level3[vegetation & ~aquatic & ~cultivated] = 112
# Natural Aquatic Vegetation (A24)
level3[vegetation & aquatic & ~cultivated] = 124
except KeyError:
logging.warning("No cultivated vegetation layer available. Skipping "
"assigning level 3 catergories for vegetation")
try:
urban = classification_data["artific_urb_cat"].values == 1
# Artificial Surface (B15)
level3[~vegetation & ~aquatic & urban] = 215
# Natural Surface (B16)
level3[~vegetation & ~aquatic & ~urban] = 216
except KeyError:
logging.warning("No urban layer available. Skipping assigning "
"level 3 for terrestrial non-vegetation")
try:
artificial_water = classification_data["artwatr_wat_cat"].values == 1
# Artificial Water (B27)
level3[~vegetation & aquatic & artificial_water] = 227
# Natural Water (B28)
level3[~vegetation & aquatic & ~artificial_water] = 228
except KeyError:
logging.warning("No artificial water layer available. Skipping assigning "
"level 3 for aquatic non-vegetation (water)")
return level1, level2, level3
| [
"numpy.uint8",
"numpy.zeros",
"logging.warning",
"numpy.zeros_like"
] | [((1476, 1533), 'numpy.zeros_like', 'numpy.zeros_like', (['classification_array'], {'dtype': 'numpy.uint8'}), '(classification_array, dtype=numpy.uint8)\n', (1492, 1533), False, 'import numpy\n'), ((1546, 1567), 'numpy.zeros_like', 'numpy.zeros_like', (['red'], {}), '(red)\n', (1562, 1567), False, 'import numpy\n'), ((1579, 1600), 'numpy.zeros_like', 'numpy.zeros_like', (['red'], {}), '(red)\n', (1595, 1600), False, 'import numpy\n'), ((1613, 1634), 'numpy.zeros_like', 'numpy.zeros_like', (['red'], {}), '(red)\n', (1629, 1634), False, 'import numpy\n'), ((3163, 3211), 'numpy.zeros', 'numpy.zeros', (['vegetation.shape'], {'dtype': 'numpy.uint8'}), '(vegetation.shape, dtype=numpy.uint8)\n', (3174, 3211), False, 'import numpy\n'), ((3357, 3373), 'numpy.uint8', 'numpy.uint8', (['(100)'], {}), '(100)\n', (3368, 3373), False, 'import numpy\n'), ((3375, 3391), 'numpy.uint8', 'numpy.uint8', (['(200)'], {}), '(200)\n', (3386, 3391), False, 'import numpy\n'), ((3587, 3602), 'numpy.uint8', 'numpy.uint8', (['(20)'], {}), '(20)\n', (3598, 3602), False, 'import numpy\n'), ((3604, 3619), 'numpy.uint8', 'numpy.uint8', (['(10)'], {}), '(10)\n', (3615, 3619), False, 'import numpy\n'), ((4412, 4536), 'logging.warning', 'logging.warning', (['"""No cultivated vegetation layer available. Skipping assigning level 3 catergories for vegetation"""'], {}), "(\n 'No cultivated vegetation layer available. Skipping assigning level 3 catergories for vegetation'\n )\n", (4427, 4536), False, 'import logging\n'), ((4837, 4949), 'logging.warning', 'logging.warning', (['"""No urban layer available. Skipping assigning level 3 for terrestrial non-vegetation"""'], {}), "(\n 'No urban layer available. Skipping assigning level 3 for terrestrial non-vegetation'\n )\n", (4852, 4949), False, 'import logging\n'), ((5277, 5404), 'logging.warning', 'logging.warning', (['"""No artificial water layer available. Skipping assigning level 3 for aquatic non-vegetation (water)"""'], {}), "(\n 'No artificial water layer available. Skipping assigning level 3 for aquatic non-vegetation (water)'\n )\n", (5292, 5404), False, 'import logging\n')] |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
n, m, k = map(int, input().strip().split())
adjacency = defaultdict(list)
for _ in range(m):
a, b = map(int, input().strip().split())
adjacency[a].append(b)
final = [False] * (n + 1)
for node in adjacency[1]:
final[node] = True
buggy = 1
neighboring_nodes = defaultdict(set)
for _ in range(k):
x, y = map(int, input().strip().split())
if final[x]:
final[x] = False
for pa in neighboring_nodes[x]:
adjacency[pa].append(x)
neighboring_nodes[x] = set()
final[y] = True
if buggy == x:
buggy = y
for node in adjacency[buggy]:
final[node] = True
neighboring_nodes[node].add(buggy)
adjacency[buggy] = []
final[buggy] = True
print(sum(final))
print(' '.join(str(i) for i in range(n + 1) if final[i] is True))
| [
"collections.defaultdict"
] | [((370, 387), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (381, 387), False, 'from collections import defaultdict\n'), ((584, 600), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (595, 600), False, 'from collections import defaultdict\n')] |
# Generated by Django 3.0.2 on 2020-02-19 18:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('level1', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='conjugation',
name='he_future',
),
migrations.RemoveField(
model_name='conjugation',
name='i_future',
),
migrations.RemoveField(
model_name='conjugation',
name='she_future',
),
migrations.RemoveField(
model_name='conjugation',
name='we_future',
),
migrations.RemoveField(
model_name='conjugation',
name='you_female_future',
),
migrations.RemoveField(
model_name='conjugation',
name='you_male_future',
),
]
| [
"django.db.migrations.RemoveField"
] | [((215, 281), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""conjugation"""', 'name': '"""he_future"""'}), "(model_name='conjugation', name='he_future')\n", (237, 281), False, 'from django.db import migrations\n'), ((326, 391), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""conjugation"""', 'name': '"""i_future"""'}), "(model_name='conjugation', name='i_future')\n", (348, 391), False, 'from django.db import migrations\n'), ((436, 503), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""conjugation"""', 'name': '"""she_future"""'}), "(model_name='conjugation', name='she_future')\n", (458, 503), False, 'from django.db import migrations\n'), ((548, 614), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""conjugation"""', 'name': '"""we_future"""'}), "(model_name='conjugation', name='we_future')\n", (570, 614), False, 'from django.db import migrations\n'), ((659, 733), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""conjugation"""', 'name': '"""you_female_future"""'}), "(model_name='conjugation', name='you_female_future')\n", (681, 733), False, 'from django.db import migrations\n'), ((778, 850), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""conjugation"""', 'name': '"""you_male_future"""'}), "(model_name='conjugation', name='you_male_future')\n", (800, 850), False, 'from django.db import migrations\n')] |
from manimlib.imports import *
from srcs.utils import run
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg
from sklearn import svm # sklearn = scikit-learn
from sklearn.datasets import make_moons
def mplfig_to_npimage(fig):
""" Converts a matplotlib figure to a RGB frame after updating the canvas"""
# only the Agg backend now supports the tostring_rgb function
canvas = FigureCanvasAgg(fig)
canvas.draw() # update/draw the elements
# get the width and the height to resize the matrix
l,b,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
# exports the canvas to a string buffer and then to a numpy nd.array
buf = canvas.tostring_rgb()
image= np.frombuffer(buf, dtype=np.uint8)
plt.close()
return image.reshape(h, w, 3)
def make_frame_mpl(t):
fig_mpl, ax = plt.subplots(1, figsize=(5, 3), facecolor='white')
xx = np.linspace(-2, 2, 200) # x向量
zz = lambda d: np.sinc(xx ** 2) + np.sin(xx + d) # (变化的)Z向量
ax.set_title("Elevation in y=0")
ax.set_ylim(-1.5, 2.5)
line, = ax.plot(xx, zz(0), lw=3)
line.set_ydata( zz(np.pi*t)) # 更新曲面
return mplfig_to_npimage(fig_mpl) # 图形的RGB图像
def make_frame(t):
X, Y = make_moons(50, noise=0.1, random_state=2) # 半随机数据
fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1, 1, 1))
fig.subplots_adjust(left=0, right=1, bottom=0)
xx, yy = np.meshgrid(np.linspace(-2, 3, 500), np.linspace(-1, 2, 500))
ax.clear()
ax.axis('off')
ax.set_title("SVC classification", fontsize=16)
classifier = svm.SVC(gamma=2, C=1)
# 不断变化的权重让数据点一个接一个的出现
weights = np.minimum(1, np.maximum(0, t**2+10-np.arange(50)))
classifier.fit(X, Y, sample_weight=weights)
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=plt.cm.bone, alpha=0.8,
vmin=-2.5, vmax=2.5, levels=np.linspace(-2,2,20))
ax.scatter(X[:,0], X[:,1], c=Y, s=50*weights, cmap=plt.cm.bone)
return mplfig_to_npimage(fig)
class manim_with_animation(Scene):
def construct(self):
during_times = ValueTracker(0)
self.img = ImageMobject(make_frame_mpl(0))
self.left_img = ImageMobject(make_frame(0))
self.img.add_updater(lambda d: d.set_array(make_frame_mpl(during_times.get_value())))
self.img.shift(2*RIGHT)
self.left_img.add_updater(lambda d: d.set_array(make_frame(during_times.get_value())))
self.left_img.shift(2*LEFT)
self.play(ShowCreation(self.img), ShowCreation(self.left_img), run_times=2)
for i in range(6):
self.play(during_times.increment_value, 0.5*i, rate_func=linear,run_times=0.5*i)
#
# for i in range(6)[::-1]:
# self.play(during_times.increment_value, 0.1*i, rate_func=linear,run_times=0.1*i)
self.wait()
if __name__=="__main__":
run([manim_with_animation]) | [
"numpy.arange",
"numpy.sinc",
"matplotlib.pyplot.close",
"sklearn.datasets.make_moons",
"numpy.linspace",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.sin",
"numpy.frombuffer",
"matplotlib.pyplot.subplots",
"srcs.utils.run",
"sklearn.svm.SVC"
] | [((452, 472), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (467, 472), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((758, 792), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (771, 792), True, 'import numpy as np\n'), ((797, 808), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (806, 808), True, 'import matplotlib.pyplot as plt\n'), ((886, 936), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(5, 3)', 'facecolor': '"""white"""'}), "(1, figsize=(5, 3), facecolor='white')\n", (898, 936), True, 'import matplotlib.pyplot as plt\n'), ((946, 969), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(200)'], {}), '(-2, 2, 200)\n', (957, 969), True, 'import numpy as np\n'), ((1266, 1307), 'sklearn.datasets.make_moons', 'make_moons', (['(50)'], {'noise': '(0.1)', 'random_state': '(2)'}), '(50, noise=0.1, random_state=2)\n', (1276, 1307), False, 'from sklearn.datasets import make_moons\n'), ((1332, 1384), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(4, 4)', 'facecolor': '(1, 1, 1)'}), '(1, figsize=(4, 4), facecolor=(1, 1, 1))\n', (1344, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1636), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(2)', 'C': '(1)'}), '(gamma=2, C=1)\n', (1622, 1636), False, 'from sklearn import svm\n'), ((2962, 2989), 'srcs.utils.run', 'run', (['[manim_with_animation]'], {}), '([manim_with_animation])\n', (2965, 2989), False, 'from srcs.utils import run\n'), ((1461, 1484), 'numpy.linspace', 'np.linspace', (['(-2)', '(3)', '(500)'], {}), '(-2, 3, 500)\n', (1472, 1484), True, 'import numpy as np\n'), ((1486, 1509), 'numpy.linspace', 'np.linspace', (['(-1)', '(2)', '(500)'], {}), '(-1, 2, 500)\n', (1497, 1509), True, 'import numpy as np\n'), ((996, 1012), 'numpy.sinc', 'np.sinc', (['(xx ** 2)'], {}), '(xx ** 2)\n', (1003, 1012), True, 'import numpy as np\n'), ((1015, 1029), 'numpy.sin', 'np.sin', (['(xx + d)'], {}), '(xx + d)\n', (1021, 1029), True, 'import numpy as np\n'), ((1973, 1995), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(20)'], {}), '(-2, 2, 20)\n', (1984, 1995), True, 'import numpy as np\n'), ((1713, 1726), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (1722, 1726), True, 'import numpy as np\n')] |
import copy
import datetime
import os
import random
import traceback
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from inference.inference_utils import get_trange, get_tqdm
def init_random_seed(value=0):
random.seed(value)
np.random.seed(value)
torch.manual_seed(value)
torch.cuda.manual_seed(value)
torch.backends.cudnn.deterministic = True
def copy_data_to_device(data, device):
if torch.is_tensor(data):
return data.to(device)
elif isinstance(data, (list, tuple)):
return [copy_data_to_device(elem, device) for elem in data]
elif isinstance(data, dict):
return {name: copy_data_to_device(value, device) for name, value in data.items()}
raise ValueError('Unexpected data type {}'.format(type(data)))
def sum_dicts(current, new):
if current is None:
return new
result = dict(current)
for name, new_value in new.items():
result[name] = result.get(name, 0) + new_value
return result
def norm_dict(current, n):
if n == 0:
return current
return {name: value / (n + 1e-6) for name, value in current.items()}
def train_eval_loop(model, train_dataset, val_dataset, criterion,
lr=1e-4, epoch_n=10, batch_size=32,
device='cuda', early_stopping_patience=10, l2_reg_alpha=0,
max_batches_per_epoch_train=10000,
max_batches_per_epoch_val=1000,
data_loader_ctor=DataLoader,
optimizer_ctor=None,
lr_scheduler_ctor=None,
shuffle_train=True,
dataloader_workers_n=0,
clip_grad=10,
save_vis_images_path=None,
save_vis_images_freq=100,
save_models_path=None,
save_models_freq=10):
device = torch.device(device)
model.to(device)
if optimizer_ctor is None:
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_reg_alpha)
else:
optimizer = optimizer_ctor(model.parameters(), lr=lr)
if lr_scheduler_ctor is not None:
lr_scheduler = lr_scheduler_ctor(optimizer)
else:
lr_scheduler = None
train_dataloader = data_loader_ctor(train_dataset, batch_size=batch_size, shuffle=shuffle_train,
num_workers=dataloader_workers_n)
val_dataloader = data_loader_ctor(val_dataset, batch_size=batch_size, shuffle=False,
num_workers=dataloader_workers_n)
best_val_loss = float('inf')
best_val_metrics = None
best_epoch_i = 0
best_model = copy.deepcopy(model)
for epoch_i in get_trange(epoch_n, desc='Epochs'):
try:
epoch_start = datetime.datetime.now()
print('Epoch {}'.format(epoch_i))
model.train()
mean_train_loss = 0
mean_train_metrics = None
train_batches_n = 0
for batch_i, (batch_x, batch_y) in get_tqdm(enumerate(train_dataloader), desc=f'Epoch {epoch_i}',
total=max_batches_per_epoch_train, leave=True):
if batch_i > max_batches_per_epoch_train:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
optimizer.step()
mean_train_loss += float(loss)
mean_train_metrics = sum_dicts(mean_train_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_train.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
train_batches_n += 1
mean_train_loss /= train_batches_n
mean_train_metrics = norm_dict(mean_train_metrics, train_batches_n)
print('Epoch: {} iterations, {:0.2f} sec'.format(train_batches_n,
(datetime.datetime.now() - epoch_start).total_seconds()))
print('Mean train loss', mean_train_loss, mean_train_metrics)
if save_models_path is not None and epoch_i % save_models_freq == 0:
torch.save(model, os.path.join(save_models_path, 'model_epoch_{:04d}.pth'.format(epoch_i)))
model.eval()
mean_val_loss = 0
mean_val_metrics = None
val_batches_n = 0
with torch.no_grad():
for batch_i, (batch_x, batch_y) in enumerate(val_dataloader):
if batch_i > max_batches_per_epoch_val:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
mean_val_loss += float(loss)
mean_val_metrics = sum_dicts(mean_val_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_val.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
val_batches_n += 1
mean_val_loss /= val_batches_n + 1e-6
mean_val_metrics = norm_dict(mean_val_metrics, val_batches_n)
print('Mean validation loss', mean_val_loss, mean_val_metrics)
if mean_val_loss < best_val_loss:
best_epoch_i = epoch_i
best_val_loss = mean_val_loss
best_val_metrics = mean_val_metrics
best_model = copy.deepcopy(model)
print('New best model!')
if save_models_path is not None:
torch.save(best_model, os.path.join(save_models_path, 'best_model.pth'))
elif epoch_i - best_epoch_i > early_stopping_patience:
print('Model has not improved during the last {} epochs, stopping training early'.format(
early_stopping_patience))
break
if lr_scheduler is not None:
lr_scheduler.step(mean_val_loss)
print()
except KeyboardInterrupt:
print('Interrupted by user')
break
except Exception as ex:
print('Fatal error during training: {}\n{}'.format(ex, traceback.format_exc()))
break
return best_val_loss, best_val_metrics, best_model
def predict_with_model(model, dataset, device='cuda', batch_size=32, num_workers=0, return_labels=False):
"""
:param model: torch.nn.Module - trained model
:param dataset: torch.utils.data.Dataset - data to apply model
:param device: cuda/cpu
:param batch_size:
:return: numpy.array dimensionality len(dataset) x *
"""
results_by_batch = []
device = torch.device(device)
model.to(device)
model.eval()
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
labels = []
with torch.no_grad():
import tqdm
for batch_x, batch_y in tqdm.tqdm_notebook(dataloader, total=len(dataset)/batch_size):
batch_x = copy_data_to_device(batch_x, device)
if return_labels:
labels.append(batch_y.numpy())
batch_pred = model(batch_x)
results_by_batch.append(batch_pred.detach().cpu().numpy())
if return_labels:
return np.concatenate(results_by_batch, 0), np.concatenate(labels, 0)
else:
return np.concatenate(results_by_batch, 0)
| [
"torch.manual_seed",
"traceback.format_exc",
"torch.utils.data.DataLoader",
"os.path.join",
"random.seed",
"torch.is_tensor",
"datetime.datetime.now",
"numpy.random.seed",
"numpy.concatenate",
"copy.deepcopy",
"torch.no_grad",
"torch.cuda.manual_seed",
"inference.inference_utils.get_trange",... | [((280, 298), 'random.seed', 'random.seed', (['value'], {}), '(value)\n', (291, 298), False, 'import random\n'), ((303, 324), 'numpy.random.seed', 'np.random.seed', (['value'], {}), '(value)\n', (317, 324), True, 'import numpy as np\n'), ((329, 353), 'torch.manual_seed', 'torch.manual_seed', (['value'], {}), '(value)\n', (346, 353), False, 'import torch\n'), ((358, 387), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['value'], {}), '(value)\n', (380, 387), False, 'import torch\n'), ((482, 503), 'torch.is_tensor', 'torch.is_tensor', (['data'], {}), '(data)\n', (497, 503), False, 'import torch\n'), ((1943, 1963), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1955, 1963), False, 'import torch\n'), ((2746, 2766), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2759, 2766), False, 'import copy\n'), ((2787, 2821), 'inference.inference_utils.get_trange', 'get_trange', (['epoch_n'], {'desc': '"""Epochs"""'}), "(epoch_n, desc='Epochs')\n", (2797, 2821), False, 'from inference.inference_utils import get_trange, get_tqdm\n'), ((7909, 7929), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (7921, 7929), False, 'import torch\n'), ((7986, 8073), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=False, num_workers=\n num_workers)\n', (7996, 8073), False, 'from torch.utils.data import DataLoader\n'), ((8094, 8109), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8107, 8109), False, 'import torch\n'), ((8601, 8636), 'numpy.concatenate', 'np.concatenate', (['results_by_batch', '(0)'], {}), '(results_by_batch, 0)\n', (8615, 8636), True, 'import numpy as np\n'), ((2862, 2885), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2883, 2885), False, 'import datetime\n'), ((8513, 8548), 'numpy.concatenate', 'np.concatenate', (['results_by_batch', '(0)'], {}), '(results_by_batch, 0)\n', (8527, 8548), True, 'import numpy as np\n'), ((8550, 8575), 'numpy.concatenate', 'np.concatenate', (['labels', '(0)'], {}), '(labels, 0)\n', (8564, 8575), True, 'import numpy as np\n'), ((5146, 5161), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5159, 5161), False, 'import torch\n'), ((6669, 6689), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (6682, 6689), False, 'import copy\n'), ((6825, 6873), 'os.path.join', 'os.path.join', (['save_models_path', '"""best_model.pth"""'], {}), "(save_models_path, 'best_model.pth')\n", (6837, 6873), False, 'import os\n'), ((7421, 7443), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7441, 7443), False, 'import traceback\n'), ((4685, 4708), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4706, 4708), False, 'import datetime\n')] |
import zmq
import time
import sys
import struct
import multiprocessing
from examples.sim_trace import generate_trace
port = "5556"
if len(sys.argv) > 1:
port = sys.argv[1]
int(port)
socket_addr = "tcp://127.0.0.1:%s" % port
worker_count = multiprocessing.cpu_count() * 2 + 1
stop_signal = False
def worker(context=None, name="worker"):
context = context or zmq.Context.instance()
worker = context.socket(zmq.ROUTER)
worker.connect(socket_addr)
print(f"Starting thread: {name}")
poller = zmq.Poller()
poller.register(worker, zmq.POLLIN)
while not stop_signal:
socks = dict(poller.poll(timeout=1000))
if worker in socks and socks[worker] == zmq.POLLIN:
ident, message = worker.recv_multipart()
# calculate trace
msg = struct.pack("d", generate_trace())
worker.send_multipart([ident, msg])
if __name__ == "__main__":
worker_names = [f"worker-{i}" for i in range(worker_count)]
worker_threads = [multiprocessing.Process(target=worker, args=(None,n)) for n in worker_names]
_ = [t.start() for t in worker_threads]
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print("Ctrl-c pressed!")
stop_signal = True
[t.join() for t in worker_threads]
break
| [
"multiprocessing.Process",
"time.sleep",
"multiprocessing.cpu_count",
"zmq.Context.instance",
"zmq.Poller",
"examples.sim_trace.generate_trace"
] | [((522, 534), 'zmq.Poller', 'zmq.Poller', ([], {}), '()\n', (532, 534), False, 'import zmq\n'), ((252, 279), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (277, 279), False, 'import multiprocessing\n'), ((374, 396), 'zmq.Context.instance', 'zmq.Context.instance', ([], {}), '()\n', (394, 396), False, 'import zmq\n'), ((1035, 1089), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker', 'args': '(None, n)'}), '(target=worker, args=(None, n))\n', (1058, 1089), False, 'import multiprocessing\n'), ((1198, 1211), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1208, 1211), False, 'import time\n'), ((842, 858), 'examples.sim_trace.generate_trace', 'generate_trace', ([], {}), '()\n', (856, 858), False, 'from examples.sim_trace import generate_trace\n')] |
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from model_bakery import baker
from app_covid19data.models import DataCovid19Item
from app_covid19data import views
class Covid19dataTest(TestCase):
def setUp(self):
""" Method which the testing framework will automatically call before every single test we run """
# Create several rows
self.datacovid19 = baker.make(DataCovid19Item, country='Spain', date=timezone.now().date(),
dead_cases=1, confirmed_cases=1, recovered_cases=1,
_quantity=5)
# Views tests
def test_covid19data_resume_view(self):
url = reverse(views.resume_view)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
# self.assertIn(w.title, resp.content)
def test_covid19data_get_resume_country(self):
# The data for the test are loading in above setUp function
queryset = views.get_resume_country('Spain')
self.assertEqual(queryset['country'], 'Spain')
def test_covid19data_get_detail_country(self):
queryset = views.get_detail_country('Spain')
print(queryset)
self.assertGreaterEqual(len(queryset), 1)
| [
"django.utils.timezone.now",
"app_covid19data.views.get_resume_country",
"app_covid19data.views.get_detail_country",
"django.urls.reverse"
] | [((728, 754), 'django.urls.reverse', 'reverse', (['views.resume_view'], {}), '(views.resume_view)\n', (735, 754), False, 'from django.urls import reverse\n'), ((1025, 1058), 'app_covid19data.views.get_resume_country', 'views.get_resume_country', (['"""Spain"""'], {}), "('Spain')\n", (1049, 1058), False, 'from app_covid19data import views\n'), ((1185, 1218), 'app_covid19data.views.get_detail_country', 'views.get_detail_country', (['"""Spain"""'], {}), "('Spain')\n", (1209, 1218), False, 'from app_covid19data import views\n'), ((487, 501), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (499, 501), False, 'from django.utils import timezone\n')] |
from typing import Optional, List
from aiogram import types, Dispatcher, filters
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types import ReplyKeyboardMarkup
from handlers.common_actions_handlers import process_manual_enter, process_option_selection, \
process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example
from keyboards import emojis, get_common_start_kb, get_next_actions_kb, get_claim_parts_kb
from repository import Repository
from statistics import collect_statistic
CLAIM_PART: str = "essence"
class EssencePart(StatesGroup):
waiting_for_user_action = State()
waiting_for_option_chosen = State()
@collect_statistic(event_name="essence:start")
async def essence_start(message: types.Message, state: FSMContext):
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(message.from_user.id)
required_parts: List[str] = ["story"]
if claim_data.get("claim_data") is None or \
not any([part_name in claim_data["claim_data"].keys() for part_name in required_parts]):
claim_parts_kb: ReplyKeyboardMarkup = get_claim_parts_kb(message.from_user.id)
await message.reply("Пожалуйста, сперва заполните раздел 'фабула'.",
reply_markup=claim_parts_kb)
return
await EssencePart.waiting_for_user_action.set()
start_kb: ReplyKeyboardMarkup = get_common_start_kb()
await message.reply("Опишите суть нарушения. "
"Введите, почему вы считаете, что ваши права нарушают. "
"Или выберите одну из следующий опций.",
reply_markup=start_kb)
@collect_statistic(event_name="essence:show_example")
async def show_example(message: types.Message, state: FSMContext):
await show_claim_tmp_example(message, CLAIM_PART)
async def action_selected(message: types.Message, state: FSMContext):
option: Optional[str] = message.text
if option.endswith("выбрать из списка") or option.endswith("добавить еще из списка"):
await process_option_selection(message, CLAIM_PART, EssencePart)
return
if option.endswith("закончить заполнение"):
await process_complete_part_editing(message, state, CLAIM_PART)
return
await process_manual_enter(message, state, EssencePart)
async def option_chosen(callback_query: types.CallbackQuery, state: FSMContext):
await claim_tmp_option_chosen(callback_query, state, CLAIM_PART)
async def finish_option_choosing(callback_query: types.CallbackQuery):
await callback_query.answer()
await EssencePart.waiting_for_user_action.set()
next_actions_kb: ReplyKeyboardMarkup = get_next_actions_kb()
await callback_query.message.answer("Введите свой вариант самостоятельно. "
"Или выберите дальнейшее действие с помощью клавиатуры",
reply_markup=next_actions_kb)
def register_handlers(dp: Dispatcher):
dp.register_message_handler(essence_start, filters.Regexp(f"^{emojis.key} суть нарушения"))
dp.register_message_handler(show_example,
filters.Regexp(f"^{emojis.red_question_mark} показать пример"),
state=EssencePart.states)
dp.register_message_handler(action_selected, state=EssencePart.waiting_for_user_action)
dp.register_callback_query_handler(
option_chosen,
filters.Text(startswith="option"),
state=EssencePart.waiting_for_option_chosen
)
dp.register_callback_query_handler(finish_option_choosing,
filters.Text(equals="complete options"),
state=EssencePart.waiting_for_option_chosen)
| [
"aiogram.filters.Text",
"aiogram.filters.Regexp",
"statistics.collect_statistic",
"handlers.common_actions_handlers.process_manual_enter",
"aiogram.dispatcher.filters.state.State",
"keyboards.get_next_actions_kb",
"handlers.common_actions_handlers.process_option_selection",
"handlers.common_actions_ha... | [((723, 768), 'statistics.collect_statistic', 'collect_statistic', ([], {'event_name': '"""essence:start"""'}), "(event_name='essence:start')\n", (740, 768), False, 'from statistics import collect_statistic\n'), ((1736, 1788), 'statistics.collect_statistic', 'collect_statistic', ([], {'event_name': '"""essence:show_example"""'}), "(event_name='essence:show_example')\n", (1753, 1788), False, 'from statistics import collect_statistic\n'), ((672, 679), 'aiogram.dispatcher.filters.state.State', 'State', ([], {}), '()\n', (677, 679), False, 'from aiogram.dispatcher.filters.state import StatesGroup, State\n'), ((712, 719), 'aiogram.dispatcher.filters.state.State', 'State', ([], {}), '()\n', (717, 719), False, 'from aiogram.dispatcher.filters.state import StatesGroup, State\n'), ((866, 878), 'repository.Repository', 'Repository', ([], {}), '()\n', (876, 878), False, 'from repository import Repository\n'), ((1467, 1488), 'keyboards.get_common_start_kb', 'get_common_start_kb', ([], {}), '()\n', (1486, 1488), False, 'from keyboards import emojis, get_common_start_kb, get_next_actions_kb, get_claim_parts_kb\n'), ((2751, 2772), 'keyboards.get_next_actions_kb', 'get_next_actions_kb', ([], {}), '()\n', (2770, 2772), False, 'from keyboards import emojis, get_common_start_kb, get_next_actions_kb, get_claim_parts_kb\n'), ((1188, 1228), 'keyboards.get_claim_parts_kb', 'get_claim_parts_kb', (['message.from_user.id'], {}), '(message.from_user.id)\n', (1206, 1228), False, 'from keyboards import emojis, get_common_start_kb, get_next_actions_kb, get_claim_parts_kb\n'), ((1866, 1909), 'handlers.common_actions_handlers.show_claim_tmp_example', 'show_claim_tmp_example', (['message', 'CLAIM_PART'], {}), '(message, CLAIM_PART)\n', (1888, 1909), False, 'from handlers.common_actions_handlers import process_manual_enter, process_option_selection, process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example\n'), ((2347, 2396), 'handlers.common_actions_handlers.process_manual_enter', 'process_manual_enter', (['message', 'state', 'EssencePart'], {}), '(message, state, EssencePart)\n', (2367, 2396), False, 'from handlers.common_actions_handlers import process_manual_enter, process_option_selection, process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example\n'), ((2490, 2548), 'handlers.common_actions_handlers.claim_tmp_option_chosen', 'claim_tmp_option_chosen', (['callback_query', 'state', 'CLAIM_PART'], {}), '(callback_query, state, CLAIM_PART)\n', (2513, 2548), False, 'from handlers.common_actions_handlers import process_manual_enter, process_option_selection, process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example\n'), ((3108, 3155), 'aiogram.filters.Regexp', 'filters.Regexp', (['f"""^{emojis.key} суть нарушения"""'], {}), "(f'^{emojis.key} суть нарушения')\n", (3122, 3155), False, 'from aiogram import types, Dispatcher, filters\n'), ((3235, 3297), 'aiogram.filters.Regexp', 'filters.Regexp', (['f"""^{emojis.red_question_mark} показать пример"""'], {}), "(f'^{emojis.red_question_mark} показать пример')\n", (3249, 3297), False, 'from aiogram import types, Dispatcher, filters\n'), ((3520, 3553), 'aiogram.filters.Text', 'filters.Text', ([], {'startswith': '"""option"""'}), "(startswith='option')\n", (3532, 3553), False, 'from aiogram import types, Dispatcher, filters\n'), ((3716, 3755), 'aiogram.filters.Text', 'filters.Text', ([], {'equals': '"""complete options"""'}), "(equals='complete options')\n", (3728, 3755), False, 'from aiogram import types, Dispatcher, filters\n'), ((2127, 2185), 'handlers.common_actions_handlers.process_option_selection', 'process_option_selection', (['message', 'CLAIM_PART', 'EssencePart'], {}), '(message, CLAIM_PART, EssencePart)\n', (2151, 2185), False, 'from handlers.common_actions_handlers import process_manual_enter, process_option_selection, process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example\n'), ((2263, 2320), 'handlers.common_actions_handlers.process_complete_part_editing', 'process_complete_part_editing', (['message', 'state', 'CLAIM_PART'], {}), '(message, state, CLAIM_PART)\n', (2292, 2320), False, 'from handlers.common_actions_handlers import process_manual_enter, process_option_selection, process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example\n')] |
"""Module related to processing of an outbound message"""
from typing import Dict, Optional
from utilities import integration_adaptors_logger as log
from builder.pystache_message_builder import MessageGenerationError
from message_handling.message_sender import MessageSender
import xml.etree.ElementTree as ET
logger = log.IntegrationAdaptorsLogger('MSG-HANDLER')
class MessageSendingError(Exception):
"""Error raised during message sending"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class MessageForwarder(object):
"""Class to provide message forwarding functionality, in particular hl7 message population is performed."""
def __init__(self, interactions: dict, message_sender: MessageSender):
"""
Constructor for the message forwarder
:param interactions: A dictionary mapping human readable interaction names to the object that is responsible
for populating the associated message template
"""
self.interactions = interactions
self.message_sender = message_sender
async def forward_message_to_mhs(self, interaction_name: str,
message_contents: Dict,
message_id: Optional[str],
correlation_id: Optional[str]):
"""
Handles forwarding a given interaction to the MHS, including populating the appropriate message template
:param interaction_name: The human readable name associated with a particular interaction
:param message_contents: The dictionary parsed from the json body
:param correlation_id:
:param message_id:
:return: None
"""
templator = self._get_interaction_template_populator(interaction_name)
populated_message = self._populate_message_template(templator, message_contents)
response = await self._send_message_to_mhs(interaction_id=templator.interaction_id,
message=populated_message,
message_id=message_id,
correlation_id=correlation_id)
return templator.parse_response(response)
def _get_interaction_template_populator(self, interaction_name: str):
"""
Retrieves the template populater object for the given interaction_name
:param interaction_name: Human readable interaction id
:return: A template populator
"""
interaction_template_populator = self.interactions.get(interaction_name)
if not interaction_template_populator:
logger.error('001', 'Failed to find interaction templator for interaction name: {name}',
{'name': interaction_name})
raise MessageGenerationError(f'Failed to find interaction with interaction name: {interaction_name}')
return interaction_template_populator
def _populate_message_template(self, template_populator, supplier_message_parameters: Dict) -> str:
"""
Generates a hl7 message string from the parameters
:param template_populator:
:param supplier_message_parameters: The parameters to be populated into the message template
:return: hl7 message string with the populated values
"""
try:
return template_populator.populate_template(supplier_message_parameters)
except Exception as e:
logger.error('002', 'Message generation failed {exception}', {'exception': e})
raise MessageGenerationError(str(e))
async def _send_message_to_mhs(self, interaction_id: str,
message: str,
message_id: Optional[str],
correlation_id: Optional[str]):
"""
Using the message sender dependency, the generated message is forwarded to the mhs
:param interaction_id: The interaction id used as part of the header
:param message: hl7 message body
:return: The response from the mhs of sending the
"""
try:
return await self.message_sender.send_message_to_mhs(interaction_id, message, message_id, correlation_id)
except Exception as e:
logger.error('003', 'Exception raised during message sending: {exception}', {'exception': e})
raise MessageSendingError(str(e))
| [
"utilities.integration_adaptors_logger.IntegrationAdaptorsLogger",
"builder.pystache_message_builder.MessageGenerationError"
] | [((320, 364), 'utilities.integration_adaptors_logger.IntegrationAdaptorsLogger', 'log.IntegrationAdaptorsLogger', (['"""MSG-HANDLER"""'], {}), "('MSG-HANDLER')\n", (349, 364), True, 'from utilities import integration_adaptors_logger as log\n'), ((2880, 2980), 'builder.pystache_message_builder.MessageGenerationError', 'MessageGenerationError', (['f"""Failed to find interaction with interaction name: {interaction_name}"""'], {}), "(\n f'Failed to find interaction with interaction name: {interaction_name}')\n", (2902, 2980), False, 'from builder.pystache_message_builder import MessageGenerationError\n')] |
#!/usr/bin/env python
# Copyright 2018 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from . import utils
# Files are normally located in sub-directories of the pipeline module path.
# For example a pipeline module 'pipeline_a.py' that expects to use a file
# or SDF picker would place its files in the directory
# 'pipelines/demo/pipeline_a'.
def pick(filename, directory=None):
"""Returns the named file. If directory is not specified the file is
expected to be located in a sub-directory whose name matches
that of the calling module otherwise the file is expected to be found in
the named directory.
:param filename: The file, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file, or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
return file_path if os.path.isfile(file_path) else None
def pick_sdf(filename, directory=None):
"""Returns a full path to the chosen SDF file. The supplied file
is not expected to contain a recognised SDF extension, this is added
automatically.
If a file with the extension `.sdf.gz` or `.sdf` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The SDF file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path + '.sdf.gz'):
return file_path + '.sdf.gz'
elif os.path.isfile(file_path + '.sdf'):
return file_path + '.sdf'
# Couldn't find a suitable SDF file
return None
def pick_csv(filename, directory=None):
"""Returns a full path to the chosen CSV file. The supplied file
is not expected to contain a recognised CSV extension, this is added
automatically.
If a file with the extension `.csv.gz` or `.csv` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The CSV file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path + '.csv.gz'):
return file_path + '.csv.gz'
elif os.path.isfile(file_path + '.csv'):
return file_path + '.csv'
# Couldn't find a suitable CSV file
return None
def pick_smi(filename, directory=None):
"""Returns a full path to the chosen SMI file. The supplied file
is not expected to contain a recognised SMI extension, this is added
automatically.
If a file with the extension `.smi.gz` or `.smi` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The SMI file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path + '.smi.gz'):
return file_path + '.smi.gz'
elif os.path.isfile(file_path + '.smi'):
return file_path + '.smi'
# Couldn't find a suitable SMI file
return None
| [
"os.path.isfile",
"os.path.join",
"os.getcwd"
] | [((1893, 1926), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (1905, 1926), False, 'import os\n'), ((3088, 3121), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (3100, 3121), False, 'import os\n'), ((3129, 3166), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.sdf.gz')"], {}), "(file_path + '.sdf.gz')\n", (3143, 3166), False, 'import os\n'), ((4441, 4474), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (4453, 4474), False, 'import os\n'), ((4482, 4519), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.csv.gz')"], {}), "(file_path + '.csv.gz')\n", (4496, 4519), False, 'import os\n'), ((5794, 5827), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (5806, 5827), False, 'import os\n'), ((5835, 5872), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.smi.gz')"], {}), "(file_path + '.smi.gz')\n", (5849, 5872), False, 'import os\n'), ((1951, 1976), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1965, 1976), False, 'import os\n'), ((3214, 3248), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.sdf')"], {}), "(file_path + '.sdf')\n", (3228, 3248), False, 'import os\n'), ((4567, 4601), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.csv')"], {}), "(file_path + '.csv')\n", (4581, 4601), False, 'import os\n'), ((5920, 5954), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.smi')"], {}), "(file_path + '.smi')\n", (5934, 5954), False, 'import os\n'), ((1787, 1798), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1796, 1798), False, 'import os\n'), ((2982, 2993), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2991, 2993), False, 'import os\n'), ((4335, 4346), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4344, 4346), False, 'import os\n'), ((5688, 5699), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5697, 5699), False, 'import os\n'), ((1857, 1868), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1866, 1868), False, 'import os\n'), ((3052, 3063), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3061, 3063), False, 'import os\n'), ((4405, 4416), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4414, 4416), False, 'import os\n'), ((5758, 5769), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5767, 5769), False, 'import os\n')] |
import numpy as np, pandas as pd
import math
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import adfuller, kpss, acf
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.figsize': (9, 7), 'figure.dpi': 120})
# Import data
def Read(name):
df = pd.read_csv(name + '.csv')
# get the Volume colume length
row_count=len(df)-1
#divide the length into equal haves
half_rowcount=row_count/2
# round up the length in case of float
count = math.ceil(half_rowcount)
# Create Training and Test
train = df.Volume[:count]
test = df.Volume[count:]
# 1,1,1 ARIMA Model
model = ARIMA(df.Volume, order=(1, 1, 1))
model_fit = model.fit(disp=0)
#print(model_fit.summary())
# Build Model
model = ARIMA(train, order=(1, 1, 1))
fitted = model.fit(disp=-1)
print(fitted.summary())
# Forecast
fc, se, conf = fitted.forecast(count, alpha=0.05) # 95% conf
# Make as pandas series
fc_series = pd.Series(fc, index=test.index)
lower_series = pd.Series(conf[:, 0], index=test.index)
upper_series = pd.Series(conf[:, 1], index=test.index)
# Plot
plt.figure(figsize=(12, 5), dpi=100)
plt.plot(train, label='training')
plt.plot(test, label='actual')
plt.plot(fc_series, label='forecast')
plt.fill_between(lower_series.index, lower_series, upper_series,
color='k', alpha=.15)
plt.title('Forecast vs Actuals')
plt.legend(loc='upper left', fontsize=8)
plt.show()
result = adfuller(df['Volume'],autolag='AIC')
if result[1] > 0.05:
print("fraud ")
else:
print("not fraud")
Read('foodico')
| [
"pandas.Series",
"math.ceil",
"pandas.read_csv",
"statsmodels.tsa.stattools.adfuller",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"statsmodels.tsa.arima_model.ARIMA",
"matplotlib.pyplot.l... | [((188, 254), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (9, 7), 'figure.dpi': 120}"], {}), "({'figure.figsize': (9, 7), 'figure.dpi': 120})\n", (207, 254), True, 'import matplotlib.pyplot as plt\n'), ((301, 327), 'pandas.read_csv', 'pd.read_csv', (["(name + '.csv')"], {}), "(name + '.csv')\n", (312, 327), True, 'import numpy as np, pandas as pd\n'), ((518, 542), 'math.ceil', 'math.ceil', (['half_rowcount'], {}), '(half_rowcount)\n', (527, 542), False, 'import math\n'), ((678, 711), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['df.Volume'], {'order': '(1, 1, 1)'}), '(df.Volume, order=(1, 1, 1))\n', (683, 711), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((816, 845), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['train'], {'order': '(1, 1, 1)'}), '(train, order=(1, 1, 1))\n', (821, 845), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((1039, 1070), 'pandas.Series', 'pd.Series', (['fc'], {'index': 'test.index'}), '(fc, index=test.index)\n', (1048, 1070), True, 'import numpy as np, pandas as pd\n'), ((1091, 1130), 'pandas.Series', 'pd.Series', (['conf[:, 0]'], {'index': 'test.index'}), '(conf[:, 0], index=test.index)\n', (1100, 1130), True, 'import numpy as np, pandas as pd\n'), ((1151, 1190), 'pandas.Series', 'pd.Series', (['conf[:, 1]'], {'index': 'test.index'}), '(conf[:, 1], index=test.index)\n', (1160, 1190), True, 'import numpy as np, pandas as pd\n'), ((1210, 1246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)', 'dpi': '(100)'}), '(figsize=(12, 5), dpi=100)\n', (1220, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1285), 'matplotlib.pyplot.plot', 'plt.plot', (['train'], {'label': '"""training"""'}), "(train, label='training')\n", (1260, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1291, 1321), 'matplotlib.pyplot.plot', 'plt.plot', (['test'], {'label': '"""actual"""'}), "(test, label='actual')\n", (1299, 1321), True, 'import matplotlib.pyplot as plt\n'), ((1327, 1364), 'matplotlib.pyplot.plot', 'plt.plot', (['fc_series'], {'label': '"""forecast"""'}), "(fc_series, label='forecast')\n", (1335, 1364), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1461), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['lower_series.index', 'lower_series', 'upper_series'], {'color': '"""k"""', 'alpha': '(0.15)'}), "(lower_series.index, lower_series, upper_series, color='k',\n alpha=0.15)\n", (1386, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1484, 1516), 'matplotlib.pyplot.title', 'plt.title', (['"""Forecast vs Actuals"""'], {}), "('Forecast vs Actuals')\n", (1493, 1516), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1562), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(8)'}), "(loc='upper left', fontsize=8)\n", (1532, 1562), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1576, 1578), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1630), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (["df['Volume']"], {'autolag': '"""AIC"""'}), "(df['Volume'], autolag='AIC')\n", (1601, 1630), False, 'from statsmodels.tsa.stattools import adfuller, kpss, acf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 15:38:54 2020
@author: rayin
"""
# pic-sure api lib
import PicSureHpdsLib
import PicSureClient
# python_lib for pic-sure
# https://github.com/hms-dbmi/Access-to-Data-using-PIC-SURE-API/tree/master/NIH_Undiagnosed_Diseases_Network
from python_lib.HPDS_connection_manager import tokenManager
from python_lib.utils import get_multiIndex_variablesDict
# analysis
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
#loading raw input patient data extracted by PIC-SURE from UDN
raw_data_all = pd.read_csv("data/raw/raw_data_all.csv")
#inclusion criteria
#exclude the cases with missing values of candidate gene and variant interpretation
case_data_with_gene = []
case_data_without_gene = []
for i in range(0, len(raw_data_all)):
if pd.isna(raw_data_all[raw_data_all.columns.values[21]].iloc[i]) or pd.isna(raw_data_all[raw_data_all.columns.values[26]].iloc[i]):
case_data_without_gene.append(raw_data_all.iloc[i])
else:
case_data_with_gene.append(raw_data_all.iloc[i])
#reformat
case_data_with_gene = pd.DataFrame(case_data_with_gene).reset_index()
case_data_with_gene = case_data_with_gene.iloc[:, 1:39]
case_data_without_gene = pd.DataFrame(case_data_without_gene).reset_index()
case_data_without_gene = case_data_without_gene.iloc[:, 1:39]
#filter the samples by row, axis=0 delete row and by column, axis = 1 delete column
def data_filter(df):
row_list = []
row_count = df.shape[1]
for i in range(0, df.shape[0]):
if df.iloc[i].isna().sum() > row_count/(2/3):
print(i)
row_list.append(i)
df_delete_row = df.drop(labels=row_list, axis=0) #inplace=True
df_delete_row.reset_index(drop=True, inplace=True)
column_count = df_delete_row.shape[0]
column_list = []
for j in range(0, df_delete_row.shape[1]):
if df_delete_row[df_delete_row.columns.values[j]].isna().sum() > column_count/2:
column_list.append(j)
drop_column = []
for i in range(0, len(column_list)):
drop_column.append(df_delete_row.columns.values[column_list[i]])
df_filter = df_delete_row.drop(labels=drop_column, axis=1)
return(df_filter)
case_data_with_gene_filter = data_filter(case_data_with_gene)
#statistics and visualization
column_name = list(case_data_with_gene_filter.columns.values)
case_data_with_gene_filter[column_name[2]].describe()
#Variant interpretation. Remove the rejected and under investigation cases.
Counter(case_data_with_gene_filter[column_name[20]])
case_gene_filter_labeled = case_data_with_gene_filter[case_data_with_gene_filter['\\11_Candidate genes\\Status\\'] != 'rejected']
case_gene_filter_labeled = case_gene_filter_labeled[case_gene_filter_labeled['\\12_Candidate variants\\03 Interpretation\\'] != 'investigation_n']
#define 'benign', 'likely benign' and 'uncertain' as 'less pathogenic', 'likely pathogenic' and 'pathogenic' as pathogenic'.
case_gene_filter_labeled = case_gene_filter_labeled.replace('benign', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('likely_benign', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('variant_u_s', 'less_pathogenic')
#case_gene_filter_labeled = case_gene_filter_labeled.replace('investigation_n', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('likely_pathogenic', 'pathogenic')
case_gene_filter_labeled.to_csv("data/processed/case_gene_filter_labeled.csv") #521 cases
#Manually remove the cases with unknown or incorrect gene names ('Exon-level microarray', '22q11.2 FISH', '20p13 duplication', etc.) and
#6 cases are excluded (index (after index_reset): 4, 55, 334, 408, 422, 496)
#Loading cases after manual curation from file case_gene_update.csv'
case_gene_update = pd.read_csv('data/processed/case_gene_update.csv', index_col=0) #515 cases
column_name = list(case_gene_update.columns.values)
protein_var = case_gene_update['\\12_Candidate variants\\09 Protein\\']
#Manual curation to remove cases with missing candidate variants or complex variants (e.g., long deletion and duplication)
#Export a clean version named 'variant_clean.csv'
| [
"pandas.DataFrame",
"pandas.read_csv",
"os.chdir",
"collections.Counter",
"pandas.isna"
] | [((551, 612), 'os.chdir', 'os.chdir', (['"""/Users/rayin/Google Drive/Harvard/5_data/UDN/work"""'], {}), "('/Users/rayin/Google Drive/Harvard/5_data/UDN/work')\n", (559, 612), False, 'import os\n'), ((693, 733), 'pandas.read_csv', 'pd.read_csv', (['"""data/raw/raw_data_all.csv"""'], {}), "('data/raw/raw_data_all.csv')\n", (704, 733), True, 'import pandas as pd\n'), ((2661, 2713), 'collections.Counter', 'Counter', (['case_data_with_gene_filter[column_name[20]]'], {}), '(case_data_with_gene_filter[column_name[20]])\n', (2668, 2713), False, 'from collections import Counter\n'), ((3985, 4048), 'pandas.read_csv', 'pd.read_csv', (['"""data/processed/case_gene_update.csv"""'], {'index_col': '(0)'}), "('data/processed/case_gene_update.csv', index_col=0)\n", (3996, 4048), True, 'import pandas as pd\n'), ((944, 1006), 'pandas.isna', 'pd.isna', (['raw_data_all[raw_data_all.columns.values[21]].iloc[i]'], {}), '(raw_data_all[raw_data_all.columns.values[21]].iloc[i])\n', (951, 1006), True, 'import pandas as pd\n'), ((1010, 1072), 'pandas.isna', 'pd.isna', (['raw_data_all[raw_data_all.columns.values[26]].iloc[i]'], {}), '(raw_data_all[raw_data_all.columns.values[26]].iloc[i])\n', (1017, 1072), True, 'import pandas as pd\n'), ((1242, 1275), 'pandas.DataFrame', 'pd.DataFrame', (['case_data_with_gene'], {}), '(case_data_with_gene)\n', (1254, 1275), True, 'import pandas as pd\n'), ((1371, 1407), 'pandas.DataFrame', 'pd.DataFrame', (['case_data_without_gene'], {}), '(case_data_without_gene)\n', (1383, 1407), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
import numpy as np
import h5py
import matplotlib.pyplot as plt
# import plotly.graph_objects as go
#========= Configuration ===========
DIR ="../data"
file_name = "particle"#"rhoNeutral" #"P"
h5 = h5py.File('../data/'+file_name+'.hdf5','r')
Lx = h5.attrs["Lx"]
Ly = h5.attrs["Ly"]
Lz = h5.attrs["Lz"]
N = h5.attrs["N"]
dp = h5.attrs["dp"]
Nt = h5.attrs["Nt"]
data_num = np.arange(start=0, stop=Nt, step=1, dtype=int)
time = data_num*dp
energy = h5["/energy"]
energy = 3*(np.array(energy[:-1]))/N
fig,ax = plt.subplots(figsize=(6, 6))
plt.plot(time[1:],energy[1:])
ax.set_xlabel("$timestep$")
ax.set_ylabel("$Energy$")
plt.show()
| [
"matplotlib.pyplot.plot",
"h5py.File",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((225, 273), 'h5py.File', 'h5py.File', (["('../data/' + file_name + '.hdf5')", '"""r"""'], {}), "('../data/' + file_name + '.hdf5', 'r')\n", (234, 273), False, 'import h5py\n'), ((407, 453), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'Nt', 'step': '(1)', 'dtype': 'int'}), '(start=0, stop=Nt, step=1, dtype=int)\n', (416, 453), True, 'import numpy as np\n'), ((544, 572), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (556, 572), True, 'import matplotlib.pyplot as plt\n'), ((573, 603), 'matplotlib.pyplot.plot', 'plt.plot', (['time[1:]', 'energy[1:]'], {}), '(time[1:], energy[1:])\n', (581, 603), True, 'import matplotlib.pyplot as plt\n'), ((661, 671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (669, 671), True, 'import matplotlib.pyplot as plt\n'), ((509, 530), 'numpy.array', 'np.array', (['energy[:-1]'], {}), '(energy[:-1])\n', (517, 530), True, 'import numpy as np\n')] |
import copy
import sys
from . import compat
from .compat import urlencode, parse_qs
class Request(compat.Request):
def __init__(self, url, parameters=None, headers=None):
self.parameters = parameters
if parameters is None:
data = None
else:
if sys.version_info >= (3, 0):
data = urlencode(parameters).encode('utf-8')
else:
byte_parameters = dict(
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in parameters.items())
data = urlencode(byte_parameters)
assert isinstance(data, bytes)
if headers is None:
headers = {}
compat.Request.__init__(self, url, data, headers)
def copy(self):
return copy.copy(self)
@property
def url(self):
return self.get_full_url()
| [
"copy.copy"
] | [((797, 812), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (806, 812), False, 'import copy\n')] |
# -*- coding: utf-8 -*-
"""
Iris classification example, pratice on using high-level API
Algorithms: Neutral Network
Reference: https://www.tensorflow.org/get_started/tflearn
Date: Jun 14, 2017
@author: <NAME>
@Library: tensorflow - high-level API with tf.contrib.learn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# import urllib # only python 2
import urllib.request # python 3
import tensorflow as tf
import numpy as np
IRIS_TRAINING = "./iris_dataset/iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "./iris_dataset/iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
def main():
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
raw = urllib.request.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, 'wb') as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.request.urlopen(IRIS_TEST_URL).read()
with open(IRIS_TEST, 'wb') as f:
f.write(raw)
# Load datasets
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename = IRIS_TRAINING,
target_dtype = np.int,
features_dtype = np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename = IRIS_TEST,
target_dtype = np.int,
features_dtype = np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension = 4)]
# Build 3 layer DNN with 10, 20, 10 units respectively
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units = [10, 20, 10],
n_classes = 3,
model_dir = "./tmp/iris_models")
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model
classifier.fit(input_fn = get_train_inputs, steps = 2000)
# # Equivalent to follows:
# classifier.fit(x = training_set.data, y = training_set.target, steps = 1000)
# classifier.fit(x = training_set.data, y = training_set.target, steps = 1000)
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy
accuracy_score = classifier.evaluate(input_fn = get_test_inputs, steps = 1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# Predict for new example
def new_samples():
return np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype = np.float32)
predictions = list(classifier.predict(input_fn = new_samples))
print("New samples, Class predictions: {}\n".format(predictions))
if __name__ == "__main__":
main() | [
"os.path.exists",
"tensorflow.contrib.learn.DNNClassifier",
"tensorflow.contrib.layers.real_valued_column",
"tensorflow.contrib.learn.datasets.base.load_csv_with_header",
"numpy.array",
"tensorflow.constant"
] | [((1176, 1303), 'tensorflow.contrib.learn.datasets.base.load_csv_with_header', 'tf.contrib.learn.datasets.base.load_csv_with_header', ([], {'filename': 'IRIS_TRAINING', 'target_dtype': 'np.int', 'features_dtype': 'np.float32'}), '(filename=IRIS_TRAINING,\n target_dtype=np.int, features_dtype=np.float32)\n', (1227, 1303), True, 'import tensorflow as tf\n'), ((1332, 1455), 'tensorflow.contrib.learn.datasets.base.load_csv_with_header', 'tf.contrib.learn.datasets.base.load_csv_with_header', ([], {'filename': 'IRIS_TEST', 'target_dtype': 'np.int', 'features_dtype': 'np.float32'}), '(filename=IRIS_TEST,\n target_dtype=np.int, features_dtype=np.float32)\n', (1383, 1455), True, 'import tensorflow as tf\n'), ((1674, 1812), 'tensorflow.contrib.learn.DNNClassifier', 'tf.contrib.learn.DNNClassifier', ([], {'feature_columns': 'feature_columns', 'hidden_units': '[10, 20, 10]', 'n_classes': '(3)', 'model_dir': '"""./tmp/iris_models"""'}), "(feature_columns=feature_columns,\n hidden_units=[10, 20, 10], n_classes=3, model_dir='./tmp/iris_models')\n", (1704, 1812), True, 'import tensorflow as tf\n'), ((841, 870), 'os.path.exists', 'os.path.exists', (['IRIS_TRAINING'], {}), '(IRIS_TRAINING)\n', (855, 870), False, 'import os\n'), ((1001, 1026), 'os.path.exists', 'os.path.exists', (['IRIS_TEST'], {}), '(IRIS_TEST)\n', (1015, 1026), False, 'import os\n'), ((1544, 1597), 'tensorflow.contrib.layers.real_valued_column', 'tf.contrib.layers.real_valued_column', (['""""""'], {'dimension': '(4)'}), "('', dimension=4)\n", (1580, 1597), True, 'import tensorflow as tf\n'), ((1988, 2018), 'tensorflow.constant', 'tf.constant', (['training_set.data'], {}), '(training_set.data)\n', (1999, 2018), True, 'import tensorflow as tf\n'), ((2027, 2059), 'tensorflow.constant', 'tf.constant', (['training_set.target'], {}), '(training_set.target)\n', (2038, 2059), True, 'import tensorflow as tf\n'), ((2378, 2404), 'tensorflow.constant', 'tf.constant', (['test_set.data'], {}), '(test_set.data)\n', (2389, 2404), True, 'import tensorflow as tf\n'), ((2413, 2441), 'tensorflow.constant', 'tf.constant', (['test_set.target'], {}), '(test_set.target)\n', (2424, 2441), True, 'import tensorflow as tf\n'), ((2692, 2764), 'numpy.array', 'np.array', (['[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]]'], {'dtype': 'np.float32'}), '([[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=np.float32)\n', (2700, 2764), True, 'import numpy as np\n')] |
import discord
import os
import openpyxl
from deep_translator import GoogleTranslator
client = discord.Client()
TOKEN = os.getenv('TOKEN')
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$help'):
text="In the first line, you have to write the language that is your input word and the language that you want to be your output like this: \n en fr \n In the second line, you must write the sentence you want to translate like this: \n hi world"
await message.channel.send(text)
elif message.content.startswith(''):
my_string=message.content
first = my_string.split('\n', 1)[0]
second_line = my_string.split('\n', 1)[1]
N = 0
count = 0
secondlang = ""
for ele in first:
if ele == ' ':
count = count + 1
if count == N:
break
secondlang = ""
else :
secondlang = secondlang + ele
Nn = 1
coun = 0
firstlang = ""
for el in first:
if el == ' ':
coun = coun + 1
if coun == Nn:
break
firstlang = ""
else :
firstlang = firstlang + el
translated = GoogleTranslator(source=firstlang, target=secondlang).translate(second_line) # output -> Weiter so, du bist großartig
await message.channel.send(translated)
client.run(TOKEN)
# print(translated)
| [
"discord.Client",
"deep_translator.GoogleTranslator",
"os.getenv"
] | [((102, 118), 'discord.Client', 'discord.Client', ([], {}), '()\n', (116, 118), False, 'import discord\n'), ((128, 146), 'os.getenv', 'os.getenv', (['"""TOKEN"""'], {}), "('TOKEN')\n", (137, 146), False, 'import os\n'), ((1508, 1561), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': 'firstlang', 'target': 'secondlang'}), '(source=firstlang, target=secondlang)\n', (1524, 1561), False, 'from deep_translator import GoogleTranslator\n')] |
import unittest
from finetune.util.input_utils import validation_settings
class TestValidationSettings(unittest.TestCase):
def test_validation_settings(self):
"""
Ensure LM only training does not error out
"""
val_size, val_interval = validation_settings(dataset_size=30, batch_size=4, val_size=0, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 0)
val_size, val_interval = validation_settings(dataset_size=80, batch_size=4, val_size=0.05, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 4)
self.assertEqual(val_interval, 4)
val_size, val_interval = validation_settings(dataset_size=80, batch_size=2, val_size=0.05, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 4)
self.assertEqual(val_interval, 8)
val_size, val_interval = validation_settings(dataset_size=400, batch_size=4, val_size=0.05, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 20)
self.assertEqual(val_interval, 20)
| [
"finetune.util.input_utils.validation_settings"
] | [((274, 383), 'finetune.util.input_utils.validation_settings', 'validation_settings', ([], {'dataset_size': '(30)', 'batch_size': '(4)', 'val_size': '(0)', 'val_interval': 'None', 'keep_best_model': '(False)'}), '(dataset_size=30, batch_size=4, val_size=0, val_interval\n =None, keep_best_model=False)\n', (293, 383), False, 'from finetune.util.input_utils import validation_settings\n'), ((451, 562), 'finetune.util.input_utils.validation_settings', 'validation_settings', ([], {'dataset_size': '(80)', 'batch_size': '(4)', 'val_size': '(0.05)', 'val_interval': 'None', 'keep_best_model': '(False)'}), '(dataset_size=80, batch_size=4, val_size=0.05,\n val_interval=None, keep_best_model=False)\n', (470, 562), False, 'from finetune.util.input_utils import validation_settings\n'), ((673, 784), 'finetune.util.input_utils.validation_settings', 'validation_settings', ([], {'dataset_size': '(80)', 'batch_size': '(2)', 'val_size': '(0.05)', 'val_interval': 'None', 'keep_best_model': '(False)'}), '(dataset_size=80, batch_size=2, val_size=0.05,\n val_interval=None, keep_best_model=False)\n', (692, 784), False, 'from finetune.util.input_utils import validation_settings\n'), ((895, 1007), 'finetune.util.input_utils.validation_settings', 'validation_settings', ([], {'dataset_size': '(400)', 'batch_size': '(4)', 'val_size': '(0.05)', 'val_interval': 'None', 'keep_best_model': '(False)'}), '(dataset_size=400, batch_size=4, val_size=0.05,\n val_interval=None, keep_best_model=False)\n', (914, 1007), False, 'from finetune.util.input_utils import validation_settings\n')] |
# Generated by Django 2.0.3 on 2018-03-16 00:17
from django.conf import settings
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MapEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('name', models.CharField(blank=True, help_text="Leave blank if it's yourself", max_length=256, null=True, verbose_name='Name of place')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='mapentry',
unique_together={('owner', 'name')},
),
]
| [
"django.db.migrations.AlterUniqueTogether",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((290, 347), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (321, 347), False, 'from django.db import migrations, models\n'), ((970, 1058), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""mapentry"""', 'unique_together': "{('owner', 'name')}"}), "(name='mapentry', unique_together={('owner',\n 'name')})\n", (1000, 1058), False, 'from django.db import migrations, models\n'), ((480, 573), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (496, 573), False, 'from django.db import migrations, models\n'), ((686, 817), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Leave blank if it\'s yourself"""', 'max_length': '(256)', 'null': '(True)', 'verbose_name': '"""Name of place"""'}), '(blank=True, help_text="Leave blank if it\'s yourself",\n max_length=256, null=True, verbose_name=\'Name of place\')\n', (702, 817), False, 'from django.db import migrations, models\n'), ((842, 938), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (859, 938), False, 'from django.db import migrations, models\n')] |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extras', '0060_customlink_button_class'),
]
operations = [
migrations.AddField(
model_name='customfield',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customfield',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.DateField"
] | [((295, 341), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (311, 341), False, 'from django.db import migrations, models\n'), ((472, 518), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (492, 518), False, 'from django.db import migrations, models\n'), ((643, 689), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (659, 689), False, 'from django.db import migrations, models\n'), ((819, 865), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (839, 865), False, 'from django.db import migrations, models\n'), ((994, 1040), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (1010, 1040), False, 'from django.db import migrations, models\n'), ((1174, 1220), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (1194, 1220), False, 'from django.db import migrations, models\n'), ((1342, 1388), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (1358, 1388), False, 'from django.db import migrations, models\n'), ((1515, 1561), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (1535, 1561), False, 'from django.db import migrations, models\n')] |
from memsql.common import database
import sys
from datetime import datetime
DATABASE = 'PREPDB'
HOST = '10.1.100.12'
PORT = '3306'
USER = 'root'
PASSWORD = '<PASSWORD>'
def get_connection(db=DATABASE):
""" Returns a new connection to the database. """
return database.connect(host=HOST, port=PORT, user=USER, password=PASSWORD, database=db)
def run_temp_scheduler():
with get_connection() as conn:
x = conn.query("call tvf_temp_scheduler()")
for i in x:
if 'ERROR' in str(list(i.values())):
print(datetime.now(), ': ', list(i.values()))
sys.exit()
else:
print(datetime.now(), ': ', list(i.values()))
print(datetime.now(), ": TEMP TABLES PERSISTED SUCCESSFULLY")
run_temp_scheduler() | [
"datetime.datetime.now",
"memsql.common.database.connect",
"sys.exit"
] | [((270, 355), 'memsql.common.database.connect', 'database.connect', ([], {'host': 'HOST', 'port': 'PORT', 'user': 'USER', 'password': 'PASSWORD', 'database': 'db'}), '(host=HOST, port=PORT, user=USER, password=PASSWORD,\n database=db)\n', (286, 355), False, 'from memsql.common import database\n'), ((613, 623), 'sys.exit', 'sys.exit', ([], {}), '()\n', (621, 623), False, 'import sys\n'), ((557, 571), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (569, 571), False, 'from datetime import datetime\n'), ((664, 678), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (676, 678), False, 'from datetime import datetime\n'), ((726, 740), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (738, 740), False, 'from datetime import datetime\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from documents.tests.utils import generate_random_documents
from categories.models import Category
class Command(BaseCommand):
args = '<number_of_documents> <category_id>'
help = 'Creates a given number of random documents'
def handle(self, *args, **options):
nb_of_docs = int(args[0])
category_id = int(args[1])
category = Category.objects.get(pk=category_id)
generate_random_documents(nb_of_docs, category)
self.stdout.write(
'Successfully generated {nb_of_docs} documents'.format(
nb_of_docs=nb_of_docs,
).encode()
)
| [
"categories.models.Category.objects.get",
"documents.tests.utils.generate_random_documents"
] | [((460, 496), 'categories.models.Category.objects.get', 'Category.objects.get', ([], {'pk': 'category_id'}), '(pk=category_id)\n', (480, 496), False, 'from categories.models import Category\n'), ((506, 553), 'documents.tests.utils.generate_random_documents', 'generate_random_documents', (['nb_of_docs', 'category'], {}), '(nb_of_docs, category)\n', (531, 553), False, 'from documents.tests.utils import generate_random_documents\n')] |
import re
from typing import Any, Dict
from checkov.common.models.consts import DOCKER_IMAGE_REGEX
from checkov.common.models.enums import CheckResult
from checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck
class ImagePullPolicyAlways(BaseK8sContainerCheck):
def __init__(self) -> None:
"""
Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct
Default is 'IfNotPresent' unless image tag is omitted or :latest
https://kubernetes.io/docs/concepts/configuration/overview/#container-images
An admission controller could be used to enforce imagePullPolicy
"""
name = "Image Pull Policy should be Always"
id = "CKV_K8S_15"
# Location: container .imagePullPolicy
super().__init__(name=name, id=id)
def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:
self.evaluated_container_keys = ["image", "imagePullPolicy"]
if conf.get("image"):
# Remove the digest, if present
image_val = conf["image"]
if not isinstance(image_val, str) or image_val.strip() == "":
return CheckResult.UNKNOWN
if "@" in image_val:
image_val = image_val[0 : image_val.index("@")]
(image, tag) = re.findall(DOCKER_IMAGE_REGEX, image_val)[0]
if "imagePullPolicy" not in conf:
if tag == "latest" or tag == "":
# Default imagePullPolicy = Always
return CheckResult.PASSED
else:
# Default imagePullPolicy = IfNotPresent
return CheckResult.FAILED
else:
if conf["imagePullPolicy"] != "Always":
return CheckResult.FAILED
else:
return CheckResult.FAILED
return CheckResult.PASSED
check = ImagePullPolicyAlways()
| [
"re.findall"
] | [((1393, 1434), 're.findall', 're.findall', (['DOCKER_IMAGE_REGEX', 'image_val'], {}), '(DOCKER_IMAGE_REGEX, image_val)\n', (1403, 1434), False, 'import re\n')] |
import os
import glob
import sys
import argparse
import re
from collections import defaultdict
from celescope.__init__ import __CONDA__
from celescope.fusion.__init__ import __STEPS__, __ASSAY__
from celescope.tools.utils import merge_report, generate_sjm
from celescope.tools.utils import parse_map_col4, multi_opts, link_data
def main():
# init
assay = __ASSAY__
steps = __STEPS__
conda = __CONDA__
app = 'celescope'
# parser
parser = multi_opts(assay)
parser.add_argument('--starMem', help='starMem', default=10)
parser.add_argument('--thread', help='thread', default=6)
parser.add_argument('--genomeDir', help='fusion genomeDir', required=True)
parser.add_argument(
"--fusion_pos",
help="first base position of the second gene(0-start),tsv file",
required=True)
parser.add_argument("--UMI_min", default=1)
args = parser.parse_args()
# read args
outdir = args.outdir
chemistry = args.chemistry
pattern = args.pattern
whitelist = args.whitelist
linker = args.linker
lowQual = args.lowQual
lowNum = args.lowNum
mod = args.mod
rm_files = args.rm_files
# parse mapfile
fq_dict, match_dict = parse_map_col4(args.mapfile, None)
# link
link_data(outdir, fq_dict)
# custom args
thread = args.thread
genomeDir = args.genomeDir
starMem = args.starMem
fusion_pos = args.fusion_pos
UMI_min = args.UMI_min
# mk log dir
logdir = outdir + '/log'
os.system('mkdir -p %s' % (logdir))
# script init
sjm_cmd = 'log_dir %s\n' % (logdir)
sjm_order = ''
shell_dict = defaultdict(str)
# outdir dict
for sample in fq_dict:
outdir_dic = {}
index = 0
for step in steps:
step_outdir = f"{outdir}/{sample}/{index:02d}.{step}"
outdir_dic.update({step: step_outdir})
index += 1
# sample
step = "sample"
cmd = (
f'{app} {assay} {step} '
f'--chemistry {chemistry} '
f'--sample {sample} --outdir {outdir_dic[step]} --assay {assay} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda)
shell_dict[sample] += cmd + '\n'
last_step = step
# barcode
arr = fq_dict[sample]
step = "barcode"
cmd = (
f'{app} {assay} {step} '
f'--fq1 {arr[0]} --fq2 {arr[1]} --chemistry {chemistry} '
f'--pattern {pattern} --whitelist {whitelist} --linker {linker} '
f'--sample {sample} --lowQual {lowQual} --thread {thread} '
f'--lowNum {lowNum} --outdir {outdir_dic[step]} --assay {assay} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=5, x=thread)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
shell_dict[sample] += cmd + '\n'
last_step = step
# adapt
step = "cutadapt"
fq = f'{outdir_dic["barcode"]}/{sample}_2.fq.gz'
cmd = (
f'{app} {assay} {step} '
f'--fq {fq} --sample {sample} --outdir '
f'{outdir_dic[step]} --assay {assay} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=5, x=1)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
shell_dict[sample] += cmd + '\n'
last_step = step
# STAR_fusion
step = 'STAR_fusion'
input_read = f'{outdir_dic["cutadapt"]}/{sample}_clean_2.fq.gz'
cmd = (
f'{app} {assay} {step} '
f'--outdir {outdir_dic[step]} --assay {assay} --sample {sample} '
f'--thread {thread} '
f'--input_read {input_read} '
f'--genomeDir {genomeDir} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=starMem, x=thread)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
shell_dict[sample] += cmd + '\n'
last_step = step
# count_fusion
step = 'count_fusion'
bam = f'{outdir_dic["STAR_fusion"]}/{sample}_Aligned.sortedByCoord.out.bam'
cmd = (
f'{app} {assay} {step} '
f'--outdir {outdir_dic[step]} --assay {assay} --sample {sample} '
f'--bam {bam} '
f'--UMI_min {UMI_min} '
f'--match_dir {match_dict[sample]} '
f'--fusion_pos {fusion_pos} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=20, x=thread)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
last_step = step
# merged report
if mod == 'sjm':
step = 'merge_report'
merge_report(
fq_dict,
steps,
last_step,
sjm_cmd,
sjm_order,
logdir,
conda,
outdir,
rm_files,
)
if mod == 'shell':
os.system('mkdir -p ./shell/')
for sample in shell_dict:
with open(f'./shell/{sample}.sh', 'w') as f:
f.write(shell_dict[sample])
if __name__ == '__main__':
main() | [
"celescope.tools.utils.parse_map_col4",
"celescope.tools.utils.link_data",
"collections.defaultdict",
"celescope.tools.utils.generate_sjm",
"celescope.tools.utils.merge_report",
"celescope.tools.utils.multi_opts",
"os.system"
] | [((469, 486), 'celescope.tools.utils.multi_opts', 'multi_opts', (['assay'], {}), '(assay)\n', (479, 486), False, 'from celescope.tools.utils import parse_map_col4, multi_opts, link_data\n'), ((1220, 1254), 'celescope.tools.utils.parse_map_col4', 'parse_map_col4', (['args.mapfile', 'None'], {}), '(args.mapfile, None)\n', (1234, 1254), False, 'from celescope.tools.utils import parse_map_col4, multi_opts, link_data\n'), ((1271, 1297), 'celescope.tools.utils.link_data', 'link_data', (['outdir', 'fq_dict'], {}), '(outdir, fq_dict)\n', (1280, 1297), False, 'from celescope.tools.utils import parse_map_col4, multi_opts, link_data\n'), ((1511, 1544), 'os.system', 'os.system', (["('mkdir -p %s' % logdir)"], {}), "('mkdir -p %s' % logdir)\n", (1520, 1544), False, 'import os\n'), ((1642, 1658), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (1653, 1658), False, 'from collections import defaultdict\n'), ((2156, 2200), 'celescope.tools.utils.generate_sjm', 'generate_sjm', (['cmd', 'f"""{step}_{sample}"""', 'conda'], {}), "(cmd, f'{step}_{sample}', conda)\n", (2168, 2200), False, 'from celescope.tools.utils import merge_report, generate_sjm\n'), ((2722, 2781), 'celescope.tools.utils.generate_sjm', 'generate_sjm', (['cmd', 'f"""{step}_{sample}"""', 'conda'], {'m': '(5)', 'x': 'thread'}), "(cmd, f'{step}_{sample}', conda, m=5, x=thread)\n", (2734, 2781), False, 'from celescope.tools.utils import merge_report, generate_sjm\n'), ((3209, 3263), 'celescope.tools.utils.generate_sjm', 'generate_sjm', (['cmd', 'f"""{step}_{sample}"""', 'conda'], {'m': '(5)', 'x': '(1)'}), "(cmd, f'{step}_{sample}', conda, m=5, x=1)\n", (3221, 3263), False, 'from celescope.tools.utils import merge_report, generate_sjm\n'), ((3805, 3870), 'celescope.tools.utils.generate_sjm', 'generate_sjm', (['cmd', 'f"""{step}_{sample}"""', 'conda'], {'m': 'starMem', 'x': 'thread'}), "(cmd, f'{step}_{sample}', conda, m=starMem, x=thread)\n", (3817, 3870), False, 'from celescope.tools.utils import merge_report, generate_sjm\n'), ((4465, 4525), 'celescope.tools.utils.generate_sjm', 'generate_sjm', (['cmd', 'f"""{step}_{sample}"""', 'conda'], {'m': '(20)', 'x': 'thread'}), "(cmd, f'{step}_{sample}', conda, m=20, x=thread)\n", (4477, 4525), False, 'from celescope.tools.utils import merge_report, generate_sjm\n'), ((4706, 4802), 'celescope.tools.utils.merge_report', 'merge_report', (['fq_dict', 'steps', 'last_step', 'sjm_cmd', 'sjm_order', 'logdir', 'conda', 'outdir', 'rm_files'], {}), '(fq_dict, steps, last_step, sjm_cmd, sjm_order, logdir, conda,\n outdir, rm_files)\n', (4718, 4802), False, 'from celescope.tools.utils import merge_report, generate_sjm\n'), ((4949, 4979), 'os.system', 'os.system', (['"""mkdir -p ./shell/"""'], {}), "('mkdir -p ./shell/')\n", (4958, 4979), False, 'import os\n')] |
# Generated by Django 2.2 on 2019-12-20 06:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('University', '0011_auto_20191219_1913'),
]
operations = [
migrations.RemoveField(
model_name='university',
name='user',
),
migrations.RemoveField(
model_name='university',
name='userEmail',
),
migrations.RemoveField(
model_name='university',
name='userPhone',
),
]
| [
"django.db.migrations.RemoveField"
] | [((228, 288), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""university"""', 'name': '"""user"""'}), "(model_name='university', name='user')\n", (250, 288), False, 'from django.db import migrations\n'), ((333, 398), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""university"""', 'name': '"""userEmail"""'}), "(model_name='university', name='userEmail')\n", (355, 398), False, 'from django.db import migrations\n'), ((443, 508), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""university"""', 'name': '"""userPhone"""'}), "(model_name='university', name='userPhone')\n", (465, 508), False, 'from django.db import migrations\n')] |
#importing lib
import pandas as pd
import numpy as np
#Take data
df = pd.DataFrame({"Name":['Kunal' , 'Mohit' , 'Rohit' ] ,"age":[np.nan , 23, 45] , "sex":['M' , np.nan , 'M']})
#check for nnull value
print(df.isnull().sum())
print(df.describe())
# ignore the nan rows
print(len(df.dropna()) , df.dropna())
#for ignoring columns
print(len(df.dropna(axis=1)) , df.dropna(axis=1))
print(len(df) , df)
#All the rows are ignore
print(df.isnull().sum())
| [
"pandas.DataFrame"
] | [((72, 179), 'pandas.DataFrame', 'pd.DataFrame', (["{'Name': ['Kunal', 'Mohit', 'Rohit'], 'age': [np.nan, 23, 45], 'sex': ['M',\n np.nan, 'M']}"], {}), "({'Name': ['Kunal', 'Mohit', 'Rohit'], 'age': [np.nan, 23, 45],\n 'sex': ['M', np.nan, 'M']})\n", (84, 179), True, 'import pandas as pd\n')] |
import datetime
from .consola import Consola
from .uiscreen import UIScreen
from ..core.reloj import Reloj
class AjustarReloj(UIScreen):
def __init__(self, unMain, unUsuario):
super().__init__(unMain)
self.usuario = unUsuario
def run(self):
self.consola.prnt("")
self.consola.prnt(" Ahora: %s" % self.main.getReloj().getFechaYTiempo().strftime("%d/%m/%Y %H:%M:%S"))
self.consola.prnt("===========================================================")
self.consola.prnt("1. Ajustar fecha")
self.consola.prnt("2. Ajustar tiempo")
self.consola.prnt("3. Avanzar una cantidad de dias")
self.consola.prnt("4. Correr procesos dependientes del tiempo")
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("9. Volver a la pantalla anterior")
self.consola.prnt("")
opt = self.consola.askInput("Ingrese el número de la opcion que le interesa: ")
if opt == "1":
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("")
inputDate = self.consola.askInput("Ingrese la fecha en formato DD/MM/YYYY: ")
try:
date = datetime.datetime.strptime(inputDate, "%d/%m/%Y")
self.consola.clear()
self.main.getReloj().resetFechaYTiempo(datetime.datetime.combine(date, self.main.getReloj().getTiempo()))
except:
self.consola.clear()
self.consola.prnt("[ERROR] La fecha ingresada es inválida")
elif opt == "2":
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("")
inputTime = self.consola.askInput("Ingrese el tiempo en formato HH:MM:SS: ")
try:
time = datetime.datetime.strptime(inputTime, "%H:%M:%S")
self.consola.clear()
self.main.getReloj().resetFechaYTiempo(datetime.datetime.combine(self.main.getReloj().getFecha(), time.time()))
except:
self.consola.clear()
self.consola.prnt("[ERROR] El tiempo ingresado es inválido")
elif opt == "3":
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("")
inputDays = self.consola.askInput("Ingrese la cantidad de dias: ")
try:
days = datetime.timedelta(days=int(inputDays))
except:
days = None
self.consola.clear()
self.consola.prnt("[ERROR] La cantidad ingresada es inválida")
if days is not None:
self.consola.clear()
self.main.getReloj().resetFechaYTiempo(self.main.getReloj().getFechaYTiempo() + days)
elif opt == "4":
self.consola.clear()
self.main.getReloj().notificar()
self.consola.prnt("[MSG] Procesos ejecutados")
elif opt == "9":
from .homeScreen import HomeScreen
self.consola.clear()
self.main.setScreen(HomeScreen(self.main, self.usuario))
else:
self.consola.clear()
| [
"datetime.datetime.strptime"
] | [((1150, 1199), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['inputDate', '"""%d/%m/%Y"""'], {}), "(inputDate, '%d/%m/%Y')\n", (1176, 1199), False, 'import datetime\n'), ((1663, 1712), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['inputTime', '"""%H:%M:%S"""'], {}), "(inputTime, '%H:%M:%S')\n", (1689, 1712), False, 'import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import traceback
from selenium.webdriver import ChromeOptions
from signin.chrome import find_chrome_driver_path, JdSession
from signin.jd_job import jobs_all
from lib.log import logger
from lib.settings import PC_UA
from lib.settings import MOBILE_UA
class JDUser:
def __init__(self, username, password, jobs_skip=None):
self.headless = True
self.logger = logger
self.ua_pc = PC_UA
self.ua = MOBILE_UA
self.username = username
self.password = password
self.jobs_skip = jobs_skip or []
class JD:
def __init__(self, username, password):
self.user = JDUser(username, password)
self.session = self.make_session()
self.job_list = [job for job in jobs_all if job.__name__ not in self.user.jobs_skip]
def sign(self):
jobs_failed = []
for job_class in self.job_list:
job = job_class(self)
# 默认使用移动设备User-agent,否则使用PC版User-Agent
# if job.is_mobile:
# job.session.headers.update({
# 'User-Agent': self.user.ua
# })
# else:
# job.session.headers.update({
# 'User-Agent': self.user.ua_pc})
try:
job.run()
except Exception as e:
logger.error('# 任务运行出错: ' + repr(e))
traceback.print_exc()
if not job.job_success:
jobs_failed.append(job.job_name)
print('=================================')
print('= 任务数: {}; 失败数: {}'.format(len(self.job_list), len(jobs_failed)))
if jobs_failed:
print('= 失败的任务: {}'.format(jobs_failed))
else:
print('= 全部成功 ~')
print('=================================')
return len(jobs_failed) == 0
def make_session(self) -> JdSession:
chrome_path = find_chrome_driver_path()
session = JdSession(webdriver_path=str(chrome_path),
browser='chrome',
webdriver_options=ChromeOptions())
session.webdriver_options.add_argument('lang=zh_CN.UTF-8')
if self.user.headless:
session.webdriver_options.add_argument('headless')
return session
if __name__ == '__main__':
pass
| [
"signin.chrome.find_chrome_driver_path",
"selenium.webdriver.ChromeOptions",
"traceback.print_exc"
] | [((1941, 1966), 'signin.chrome.find_chrome_driver_path', 'find_chrome_driver_path', ([], {}), '()\n', (1964, 1966), False, 'from signin.chrome import find_chrome_driver_path, JdSession\n'), ((2120, 2135), 'selenium.webdriver.ChromeOptions', 'ChromeOptions', ([], {}), '()\n', (2133, 2135), False, 'from selenium.webdriver import ChromeOptions\n'), ((1427, 1448), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1446, 1448), False, 'import traceback\n')] |
from unittest import TestCase
from unittest.mock import Mock
from tests.test_types_generator import athena_task
class TestAwsAthenaTask(TestCase):
def test_run_task(self) -> None:
with self.assertRaises(NotImplementedError):
athena_task()._run_task(Mock())
| [
"unittest.mock.Mock",
"tests.test_types_generator.athena_task"
] | [((276, 282), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (280, 282), False, 'from unittest.mock import Mock\n'), ((252, 265), 'tests.test_types_generator.athena_task', 'athena_task', ([], {}), '()\n', (263, 265), False, 'from tests.test_types_generator import athena_task\n')] |
from . import db
from flask import Flask, current_app
from . import create_app
import os
from . import db
app = create_app()
with app.app_context():
if os.path.exists("clearsky/config.json"):
pass
else:
with open('clearsky/config.json', 'w') as configuration:
print("Opened config file")
configuration.write("""
{
"OpenWeather-url": "https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&exclude=minutely,daily,alerts&appid={}&units=imperial",
"OpenWeather-key": "",
"Radar.io-url": "https://api.radar.io/v1/geocode/forward?query={}",
"Radar.io-key": ""
}
""")
if not os.path.exists(current_app.instance_path + "/" + ('clearsky.sqlite')):
print("Initializing database for first-time use")
db.init_db()
| [
"os.path.exists"
] | [((159, 197), 'os.path.exists', 'os.path.exists', (['"""clearsky/config.json"""'], {}), "('clearsky/config.json')\n", (173, 197), False, 'import os\n'), ((660, 727), 'os.path.exists', 'os.path.exists', (["(current_app.instance_path + '/' + 'clearsky.sqlite')"], {}), "(current_app.instance_path + '/' + 'clearsky.sqlite')\n", (674, 727), False, 'import os\n')] |
import logging
from abc import ABC, abstractmethod
from file_read_backwards import FileReadBackwards
import threading
import os
class Logger(ABC):
def __init__(self,filename):
self.lock = threading.Lock()
self.dir = "Logs"
if(not os.path.isdir(self.dir)):
os.mkdir(self.dir)
self.file_name = "{dir}/{filename}".format(dir=self.dir,filename=filename)
last_index = self.get_last_index()
self.last_sended_index = str(last_index)
self.log_id = 0 if last_index == 1 else last_index
def set_up_logger(self,formatter,name):
"""Set formmater and name of logge
Arguments:
formatter {string} -- The message formatter
name {string} -- The name of logger
"""
self.formatter = logging.Formatter(formatter)
self.handler = logging.FileHandler(self.file_name)
self.handler.setFormatter(self.formatter)
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.INFO)
self.logger.addHandler(self.handler)
@abstractmethod
def get_instance(self):
"""Return the singleton instance
of logger. Every logger must
override.
"""
pass
def set_last_sended_index(self,index):
"""Set value of variable last_sended_index
Arguments:
index {int} -- The last index that ground station received
"""
self.last_sended_index = index
def get_last_index(self):
"""Get index from last valid row
Returns:
[int] -- The index from last row or
1 if there is no file.
"""
if(self.isSafeToRead()):
with FileReadBackwards(self.file_name, encoding="utf-8") as log_file:
for line in log_file:
try:
return int(line.split(',')[0])
except:
continue
return 1
def isSafeToRead(self):
"""Checks if is safe to read the log file
Returns:
[boolean] -- True: dir and filename exists
False: the dir or the filename doesn't exists
"""
return os.path.isdir(self.dir) and os.path.exists(self.file_name)
def get_unsend_data(self):
"""Get the logs that haven't been send to ground station
Returns:
[list: unsend_logs] -- The unsend logs
[int: total_logs] -- The total count of unsend logs
"""
unsend_logs = []
total_logs = 0
with FileReadBackwards(self.file_name, encoding="utf-8") as log_file:
for line in log_file:
line_id = line.split(',')[0]
if line_id == self.last_sended_index:
if line_id == 1 :
total_logs += 1
unsend_logs.insert(0, line)
break
total_logs += 1
unsend_logs.insert(0, line)
return unsend_logs, total_logs
def inc_log_id(self):
"""Safely increases log id
"""
self.lock.acquire()
self.log_id += 1
self.lock.release()
def write_info(self,message):
"""Logs info message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.info(message,
extra={'log_id':self.log_id})
def write_error(self,message):
"""Logs error message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.error(message,
extra={'log_id':self.log_id})
def write_warning(self,message):
"""Logs warning message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.warning(message,
extra={'log_id':self.log_id})
def write_debug(self,message):
"""Logs debug message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.debug(message,
extra={'log_id':self.log_id})
def write_critical(self,message):
"""Logs critical message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.critical(message,
extra={'log_id':self.log_id})
def write_exception(self,message):
"""Logs exception message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.exception(message,
extra={'log_id':self.log_id})
"""
Class for Logging ADCS action so you can
recover your system.
"""
class AdcsLogger(Logger):
__instance = None
def __init__(self, filename = 'adcs.log'):
if AdcsLogger.__instance != None:
raise Exception("This class is a singleton!")
else:
super(AdcsLogger, self).__init__(filename)
formatter = '%(message)s'
self.set_up_logger(formatter,'adcs_logger')
AdcsLogger.__instance = self
def get_instance(self):
if AdcsLogger.__instance == None:
AdcsLogger()
return AdcsLogger.__instance
class InfoLogger(Logger):
__instance = None
def __init__(self, filename = 'info.log'):
if InfoLogger.__instance != None:
raise Exception("This class is a singleton!")
else:
super(InfoLogger, self).__init__(filename)
formatter = '%(log_id)s,%(asctime)s %(levelname)s %(message)s'
self.set_up_logger(formatter,'info_logger')
InfoLogger.__instance = self
def get_instance(self):
if InfoLogger.__instance == None:
InfoLogger()
return InfoLogger.__instance
class DataLogger(Logger):
__instance = None
def __init__(self, filename = 'data.log'):
if DataLogger.__instance != None:
raise Exception("This class is a singleton!")
else:
super(DataLogger, self).__init__(filename)
formatter = '%(log_id)s,%(message)s'
self.set_up_logger(formatter,'data_logger')
DataLogger.__instance = self
def get_instance():
if DataLogger.__instance == None:
DataLogger()
return DataLogger.__instance
class GroundLogger(Logger):
def __init__(self, filename = 'elink.info.log'):
super(GroundLogger, self).__init__(filename)
formatter = '%(message)s'
self.set_up_logger(formatter,'logger_{filename}'.format(filename=filename))
def get_instance(self):
pass
| [
"logging.getLogger",
"os.path.exists",
"threading.Lock",
"logging.Formatter",
"os.path.isdir",
"logging.FileHandler",
"os.mkdir",
"file_read_backwards.FileReadBackwards"
] | [((204, 220), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (218, 220), False, 'import threading\n'), ((830, 858), 'logging.Formatter', 'logging.Formatter', (['formatter'], {}), '(formatter)\n', (847, 858), False, 'import logging\n'), ((882, 917), 'logging.FileHandler', 'logging.FileHandler', (['self.file_name'], {}), '(self.file_name)\n', (901, 917), False, 'import logging\n'), ((990, 1013), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1007, 1013), False, 'import logging\n'), ((262, 285), 'os.path.isdir', 'os.path.isdir', (['self.dir'], {}), '(self.dir)\n', (275, 285), False, 'import os\n'), ((300, 318), 'os.mkdir', 'os.mkdir', (['self.dir'], {}), '(self.dir)\n', (308, 318), False, 'import os\n'), ((2297, 2320), 'os.path.isdir', 'os.path.isdir', (['self.dir'], {}), '(self.dir)\n', (2310, 2320), False, 'import os\n'), ((2325, 2355), 'os.path.exists', 'os.path.exists', (['self.file_name'], {}), '(self.file_name)\n', (2339, 2355), False, 'import os\n'), ((2667, 2718), 'file_read_backwards.FileReadBackwards', 'FileReadBackwards', (['self.file_name'], {'encoding': '"""utf-8"""'}), "(self.file_name, encoding='utf-8')\n", (2684, 2718), False, 'from file_read_backwards import FileReadBackwards\n'), ((1777, 1828), 'file_read_backwards.FileReadBackwards', 'FileReadBackwards', (['self.file_name'], {'encoding': '"""utf-8"""'}), "(self.file_name, encoding='utf-8')\n", (1794, 1828), False, 'from file_read_backwards import FileReadBackwards\n')] |
from path import path_code_dir
import sys
sys.path.insert(0, path_code_dir)
from amftrack.pipeline.functions.image_processing.extract_width_fun import *
from amftrack.pipeline.functions.image_processing.experiment_class_surf import Experiment, save_graphs, load_graphs
from amftrack.util import get_dates_datetime, get_dirname
import pickle
import networkx as nx
import pandas as pd
from amftrack.pipeline.paths.directory import directory_scratch
from path import path_code_dir
import os
import json
from datetime import datetime
from pymatreader import read_mat
import cv2
import matplotlib.pyplot as plt
from IPython.display import clear_output
from amftrack.plotutil import plot_t_tp1
from amftrack.notebooks.analysis.util import directory_scratch
import imageio
directory = str(sys.argv[1])
overwrite = eval(sys.argv[2])
i = int(sys.argv[-1])
op_id = int(sys.argv[-2])
run_info = pd.read_json(f'{directory_scratch}temp/{op_id}.json')
list_f,list_args = pickle.load(open(f'{directory_scratch}temp/{op_id}.pick', "rb"))
folder_list = list(run_info['folder_analysis'])
directory_name = folder_list[i]
select = run_info.loc[run_info['folder_analysis'] == directory_name]
row = [row for index, row in select.iterrows()][0]
plate_num = row['Plate']
path_exp = f'{directory}{row["path_exp"]}'
exp = pickle.load(open(path_exp, "rb"))
exp.dates.sort()
save_graphs(exp)
exp.nx_graph = None
dirName = exp.save_location
exp.pickle_save(f"{dirName}/")
| [
"amftrack.pipeline.functions.image_processing.experiment_class_surf.save_graphs",
"sys.path.insert",
"pandas.read_json"
] | [((46, 79), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path_code_dir'], {}), '(0, path_code_dir)\n', (61, 79), False, 'import sys\n'), ((914, 967), 'pandas.read_json', 'pd.read_json', (['f"""{directory_scratch}temp/{op_id}.json"""'], {}), "(f'{directory_scratch}temp/{op_id}.json')\n", (926, 967), True, 'import pandas as pd\n'), ((1387, 1403), 'amftrack.pipeline.functions.image_processing.experiment_class_surf.save_graphs', 'save_graphs', (['exp'], {}), '(exp)\n', (1398, 1403), False, 'from amftrack.pipeline.functions.image_processing.experiment_class_surf import Experiment, save_graphs, load_graphs\n')] |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on the explanation in the book:
# "Dive into deep learning", <NAME>, <NAME>, <NAME>, <NAME>
import os
import time
import mxnet as mx
import cv2 as cv
import numpy
from mxnet import gluon
from mxnet import autograd
from mxnet import image
from mxnet import init
from mxnet import np, npx
from mxnet.gluon import nn
import glob
import matplotlib.pyplot as plt
from datetime import timedelta
from cartonifier import Cartonifier
# %%
# -- Settings
npx.set_np()
def find_root_folder(project_folder):
folder_list = os.getcwd().split(sep="/")
root_folder_list = []
for folder in folder_list:
if folder == project_folder:
break
else:
root_folder_list.append(folder)
root_folder_list.append(project_folder)
return "/" + os.path.join(*root_folder_list)
class GeneratedImage(nn.Block):
def __init__(self, img_shape, **kwargs):
super(GeneratedImage, self).__init__(**kwargs)
self.weight = self.params.get('weight', shape=img_shape)
def forward(self):
return self.weight.data()
class StyleTransferGF:
def __init__(self, content_image, style_image, image_size, content_weight=1.0, style_weight=1.0e4, tv_weight=10.0,
lr=0.1, out_image_filepath=None):
super(StyleTransferGF, self).__init__()
self.IMAGE_SIZE = image_size
self.N_EPOCHS = 600
self.RGB_MEAN = np.array([0.485, 0.456, 0.406])
self.RGB_STD = np.array([0.229, 0.224, 0.225])
self.style_layers = [0, 5, 10, 19, 28]
self.content_layers = [25]
self.LR = lr
self.LR_DECAY_EPOCH = 300
self.CONTENT_WEIGHT = content_weight
self.STYLE_WEIGHT = style_weight
self.TV_WEIGHT = tv_weight
self.mx_ctx = mx.gpu(0)
self.out_image_filepath = out_image_filepath
# Load and prepare images
if isinstance(content_image, numpy.ndarray):
self.content_image = self.as_nd_np(content_image)
elif isinstance(content_image, str):
self.content_image = image.imread(content_image)
else:
raise TypeError("Only numpy array or str are supported.")
if isinstance(style_image, numpy.ndarray):
self.style_image = self.as_nd_np(style_image)
elif isinstance(style_image, str):
self.style_image = image.imread(style_image)
else:
raise TypeError("Only numpy array or str are supported.")
# Load and prepare feature extractor
pretrained_net = gluon.model_zoo.vision.vgg19(pretrained=True)
self.net = nn.Sequential()
for i in range(max(self.content_layers + self.style_layers) + 1):
self.net.add(pretrained_net.features[i])
def smooth(self, src: mx.numpy.ndarray, d: int, sigma_color: int, sigma_space: int):
img = image.imresize(src, *self.IMAGE_SIZE)
dst = cv.bilateralFilter(img.asnumpy(), d, sigma_color, sigma_space)
dst = self.as_nd_np(dst)
return dst
def as_nd_np(self, img):
return mx.nd.array(img, dtype=np.int32).as_np_ndarray()
def preprocess(self, img):
img = image.imresize(img, *self.IMAGE_SIZE)
img = (img.astype('float32') / 255 - self.RGB_MEAN) / self.RGB_STD
return np.expand_dims(img.transpose(2, 0, 1), axis=0)
def postprocess(self, img):
img = img[0].as_in_ctx(self.RGB_STD.ctx)
return (img.transpose(1, 2, 0) * self.RGB_STD + self.RGB_MEAN).clip(0, 1)
def extract_features(self, x):
contents = []
styles = []
for i in range(len(self.net)):
x = self.net[i](x)
if i in self.style_layers:
styles.append(x)
if i in self.content_layers:
contents.append(x)
return contents, styles
def get_contents(self):
content_x = self.preprocess(self.content_image).copyto(self.mx_ctx)
contents_y, _ = self.extract_features(content_x)
return content_x, contents_y
def get_styles(self):
style_x = self.preprocess(self.style_image).copyto(self.mx_ctx)
_, styles_y = self.extract_features(style_x)
return style_x, styles_y
def get_inits(self, x, styles_y):
gen_img = GeneratedImage(x.shape)
gen_img.initialize(init.Constant(x), ctx=self.mx_ctx, force_reinit=True)
trainer = gluon.Trainer(gen_img.collect_params(), 'adam', {'learning_rate': self.LR})
styles_y_gram = [self.gram(y) for y in styles_y]
return gen_img(), styles_y_gram, trainer
@staticmethod
def content_loss(y_hat, y):
return np.square(y_hat, y).mean()
@staticmethod
def gram(x):
num_channels = x.shape[1]
n = x.size // x.shape[1]
x = x.reshape(num_channels, n)
return np.dot(x, x.T) / (num_channels * n)
@staticmethod
def style_loss(y_hat, gram_y):
return np.square(StyleTransferGF.gram(y_hat) - gram_y).mean()
@staticmethod
def tv_loss(y_hat):
return 0.5 * (np.abs(y_hat[:, :, 1:, :] - y_hat[:, :, :-1, :]).mean() +
np.abs(y_hat[:, :, :, 1:] - y_hat[:, :, :, :-1]).mean())
def compute_loss(self, x, contents_y_hat, styles_y_hat, contents_y, styles_y_gram):
contents_l = [StyleTransferGF.content_loss(y_hat, y) * self.CONTENT_WEIGHT for y_hat, y in
zip(contents_y_hat, contents_y)]
styles_l = [StyleTransferGF.style_loss(y_hat, y) * self.STYLE_WEIGHT for y_hat, y in
zip(styles_y_hat, styles_y_gram)]
tv_l = StyleTransferGF.tv_loss(x) * self.TV_WEIGHT
l = sum(styles_l + contents_l + [tv_l])
return contents_l, styles_l, tv_l, l
def train(self):
self.net.collect_params().reset_ctx(self.mx_ctx)
content_x, contents_y = self.get_contents()
_, styles_y = self.get_styles()
x, styles_y_gram, trainer = self.get_inits(content_x, styles_y)
styles_y_gram = [StyleTransferGF.gram(Y) for Y in styles_y]
for epoch in range(self.N_EPOCHS):
with autograd.record():
contents_y_hat, styles_y_hat = self.extract_features(x)
contents_l, styles_l, tv_l, l = self.compute_loss(x, contents_y_hat, styles_y_hat, contents_y,
styles_y_gram)
l.backward()
trainer.step(1)
npx.waitall()
if epoch % self.LR_DECAY_EPOCH == 0:
trainer.set_learning_rate(trainer.learning_rate * 0.3)
if epoch % 100 == 0:
msg = [
f"Size: {self.IMAGE_SIZE}",
f"Epoch: {epoch}",
f"contents_l: {float(sum(contents_l)):0.3f}",
f"style_l: {float(sum(styles_l)):0.3f}",
f"tv_l: {float(tv_l):0.3f}",
f"total_l: {float(l):0.3f}"
]
msg = ", ".join(msg)
print(msg)
# plt.imshow(self.postprocess(x).asnumpy())
# plt.show()
out = self.postprocess(x).asnumpy()
out = (out * 255).astype(numpy.uint8)
if self.out_image_filepath is not None:
cv.imwrite(self.out_image_filepath, cv.cvtColor(out, cv.COLOR_RGB2BGR))
return out
# %%
# -- Train (continued)
def get_output_filepath(content_image_filepath, style_image_filepath, cw, sw, tw, output_folder):
filename_noext1 = os.path.splitext(os.path.basename(content_image_filepath))[0]
filename_noext2 = os.path.splitext(os.path.basename(style_image_filepath))[0]
out = f"{filename_noext1}_{filename_noext2}_{cw}_{sw}_{tw}.png"
out = os.path.join(output_folder, out)
return out
def process_image(content_image_filepath, style_image_filepath, content_weight, style_weight, tv_weight, output_folder,
timestamp):
print(f"[ ] Processing {os.path.basename(content_image_filepath)} with settings: {content_weight} {style_weight} {tv_weight}")
alpha = 0.90
scales = ((200, 150), (283, 212), (400, 300), (566, 424), (800, 600))
lr_list = (0.7, 0.6, 0.5, 0.5, 0.5)
# Prepare content image.
original_image = cv.cvtColor(cv.imread(content_image_filepath), cv.COLOR_BGR2RGB)
shape = original_image.shape
ratio = shape[1] / shape[0]
if ratio < 1:
original_image = cv.rotate(original_image, cv.ROTATE_90_CLOCKWISE)
is_rotated = True
else:
is_rotated = False
content_image = cv.resize(original_image, scales[0], cv.INTER_CUBIC)
# Prepare style image.
original_style_image = cv.cvtColor(cv.imread(style_image_filepath), cv.COLOR_BGR2RGB)
shape = original_style_image.shape
ratio = shape[1] / shape[0]
if ratio < 1:
original_style_image = cv.rotate(original_style_image, cv.ROTATE_90_CLOCKWISE)
style_image = cv.resize(original_style_image, scales[0], cv.INTER_CUBIC)
index = 0
for index, scale in enumerate(scales):
if index > 0:
src1 = cv.resize(original_image, dsize=scale, interpolation=cv.INTER_CUBIC)
src2 = cv.resize(content_image, dsize=scale, interpolation=cv.INTER_CUBIC)
src2 = cv.medianBlur(src2, ksize=3)
src3 = cv.addWeighted(src2, alpha, src1, 1.0 - alpha, 0)
content_image = src3
style_image = cv.resize(original_style_image, dsize=scale, interpolation=cv.INTER_CUBIC)
output_filepath = None
lr = lr_list[index]
style_transfer_gf = StyleTransferGF(content_image, style_image, scale, content_weight=content_weight,
style_weight=style_weight, tv_weight=tv_weight,
out_image_filepath=output_filepath)
content_image = style_transfer_gf.train()
del style_transfer_gf
time.sleep(3)
if is_rotated:
content_image = cv.rotate(content_image, cv.ROTATE_90_COUNTERCLOCKWISE)
output_filepath = get_output_filepath(content_image_filepath, style_image_filepath, content_weight, style_weight, tv_weight, output_folder)
cv.imwrite(output_filepath, cv.cvtColor(content_image, cv.COLOR_RGB2BGR))
def main():
root_folder = find_root_folder("mxnet-cookbook")
output_folder = os.path.join(root_folder, "data", "output")
os.makedirs(output_folder, exist_ok=True)
timestamp = str(int(time.time()))
content_weight_list = [1.0]
style_weight_list = [1e4]
tv_weight_list = [10]
content_image_filepath_list = sorted(glob.glob(os.path.join(root_folder, "data", "input", "IMG_*")))
content_image_filepath_list = [content_image_filepath_list[0]]
style_image_filepath_list = sorted(glob.glob(os.path.join(root_folder, "data", "style_transfer", "*.jpeg")))
for style_weight in style_weight_list:
for content_weight in content_weight_list:
for tv_weight in tv_weight_list:
for content_image_filename in content_image_filepath_list:
for style_image_filename in style_image_filepath_list:
tic = time.time()
if not os.path.exists(style_image_filename):
raise FileNotFoundError(f"Cannot find {style_image_filename}")
if not os.path.exists(content_image_filename):
raise FileNotFoundError(f"Cannot find {content_image_filename}")
process_image(content_image_filename, style_image_filename, content_weight, style_weight,
tv_weight, output_folder, timestamp)
toc = time.time()
print(f"Elapsed time: f{timedelta(seconds=(toc - tic))}")
if __name__ == '__main__':
main()
| [
"mxnet.autograd.record",
"time.sleep",
"mxnet.gpu",
"mxnet.init.Constant",
"datetime.timedelta",
"mxnet.gluon.nn.Sequential",
"os.path.exists",
"mxnet.np.dot",
"cv2.medianBlur",
"mxnet.image.imread",
"cv2.addWeighted",
"mxnet.npx.set_np",
"mxnet.nd.array",
"mxnet.npx.waitall",
"mxnet.np.... | [((1056, 1068), 'mxnet.npx.set_np', 'npx.set_np', ([], {}), '()\n', (1066, 1068), False, 'from mxnet import np, npx\n'), ((8332, 8364), 'os.path.join', 'os.path.join', (['output_folder', 'out'], {}), '(output_folder, out)\n', (8344, 8364), False, 'import os\n'), ((9150, 9202), 'cv2.resize', 'cv.resize', (['original_image', 'scales[0]', 'cv.INTER_CUBIC'], {}), '(original_image, scales[0], cv.INTER_CUBIC)\n', (9159, 9202), True, 'import cv2 as cv\n'), ((9514, 9572), 'cv2.resize', 'cv.resize', (['original_style_image', 'scales[0]', 'cv.INTER_CUBIC'], {}), '(original_style_image, scales[0], cv.INTER_CUBIC)\n', (9523, 9572), True, 'import cv2 as cv\n'), ((10931, 10974), 'os.path.join', 'os.path.join', (['root_folder', '"""data"""', '"""output"""'], {}), "(root_folder, 'data', 'output')\n", (10943, 10974), False, 'import os\n'), ((10979, 11020), 'os.makedirs', 'os.makedirs', (['output_folder'], {'exist_ok': '(True)'}), '(output_folder, exist_ok=True)\n', (10990, 11020), False, 'import os\n'), ((1385, 1416), 'os.path.join', 'os.path.join', (['*root_folder_list'], {}), '(*root_folder_list)\n', (1397, 1416), False, 'import os\n'), ((2006, 2037), 'mxnet.np.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2014, 2037), False, 'from mxnet import np, npx\n'), ((2061, 2092), 'mxnet.np.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2069, 2092), False, 'from mxnet import np, npx\n'), ((2373, 2382), 'mxnet.gpu', 'mx.gpu', (['(0)'], {}), '(0)\n', (2379, 2382), True, 'import mxnet as mx\n'), ((3140, 3185), 'mxnet.gluon.model_zoo.vision.vgg19', 'gluon.model_zoo.vision.vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3168, 3185), False, 'from mxnet import gluon\n'), ((3205, 3220), 'mxnet.gluon.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (3218, 3220), False, 'from mxnet.gluon import nn\n'), ((3452, 3489), 'mxnet.image.imresize', 'image.imresize', (['src', '*self.IMAGE_SIZE'], {}), '(src, *self.IMAGE_SIZE)\n', (3466, 3489), False, 'from mxnet import image\n'), ((3759, 3796), 'mxnet.image.imresize', 'image.imresize', (['img', '*self.IMAGE_SIZE'], {}), '(img, *self.IMAGE_SIZE)\n', (3773, 3796), False, 'from mxnet import image\n'), ((8856, 8889), 'cv2.imread', 'cv.imread', (['content_image_filepath'], {}), '(content_image_filepath)\n', (8865, 8889), True, 'import cv2 as cv\n'), ((9017, 9066), 'cv2.rotate', 'cv.rotate', (['original_image', 'cv.ROTATE_90_CLOCKWISE'], {}), '(original_image, cv.ROTATE_90_CLOCKWISE)\n', (9026, 9066), True, 'import cv2 as cv\n'), ((9269, 9300), 'cv2.imread', 'cv.imread', (['style_image_filepath'], {}), '(style_image_filepath)\n', (9278, 9300), True, 'import cv2 as cv\n'), ((9440, 9495), 'cv2.rotate', 'cv.rotate', (['original_style_image', 'cv.ROTATE_90_CLOCKWISE'], {}), '(original_style_image, cv.ROTATE_90_CLOCKWISE)\n', (9449, 9495), True, 'import cv2 as cv\n'), ((10509, 10522), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (10519, 10522), False, 'import time\n'), ((10566, 10621), 'cv2.rotate', 'cv.rotate', (['content_image', 'cv.ROTATE_90_COUNTERCLOCKWISE'], {}), '(content_image, cv.ROTATE_90_COUNTERCLOCKWISE)\n', (10575, 10621), True, 'import cv2 as cv\n'), ((10798, 10842), 'cv2.cvtColor', 'cv.cvtColor', (['content_image', 'cv.COLOR_RGB2BGR'], {}), '(content_image, cv.COLOR_RGB2BGR)\n', (10809, 10842), True, 'import cv2 as cv\n'), ((1127, 1138), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1136, 1138), False, 'import os\n'), ((4918, 4934), 'mxnet.init.Constant', 'init.Constant', (['x'], {}), '(x)\n', (4931, 4934), False, 'from mxnet import init\n'), ((5422, 5436), 'mxnet.np.dot', 'np.dot', (['x', 'x.T'], {}), '(x, x.T)\n', (5428, 5436), False, 'from mxnet import np, npx\n'), ((7045, 7058), 'mxnet.npx.waitall', 'npx.waitall', ([], {}), '()\n', (7056, 7058), False, 'from mxnet import np, npx\n'), ((8127, 8167), 'os.path.basename', 'os.path.basename', (['content_image_filepath'], {}), '(content_image_filepath)\n', (8143, 8167), False, 'import os\n'), ((8211, 8249), 'os.path.basename', 'os.path.basename', (['style_image_filepath'], {}), '(style_image_filepath)\n', (8227, 8249), False, 'import os\n'), ((9673, 9741), 'cv2.resize', 'cv.resize', (['original_image'], {'dsize': 'scale', 'interpolation': 'cv.INTER_CUBIC'}), '(original_image, dsize=scale, interpolation=cv.INTER_CUBIC)\n', (9682, 9741), True, 'import cv2 as cv\n'), ((9761, 9828), 'cv2.resize', 'cv.resize', (['content_image'], {'dsize': 'scale', 'interpolation': 'cv.INTER_CUBIC'}), '(content_image, dsize=scale, interpolation=cv.INTER_CUBIC)\n', (9770, 9828), True, 'import cv2 as cv\n'), ((9848, 9876), 'cv2.medianBlur', 'cv.medianBlur', (['src2'], {'ksize': '(3)'}), '(src2, ksize=3)\n', (9861, 9876), True, 'import cv2 as cv\n'), ((9896, 9945), 'cv2.addWeighted', 'cv.addWeighted', (['src2', 'alpha', 'src1', '(1.0 - alpha)', '(0)'], {}), '(src2, alpha, src1, 1.0 - alpha, 0)\n', (9910, 9945), True, 'import cv2 as cv\n'), ((10005, 10079), 'cv2.resize', 'cv.resize', (['original_style_image'], {'dsize': 'scale', 'interpolation': 'cv.INTER_CUBIC'}), '(original_style_image, dsize=scale, interpolation=cv.INTER_CUBIC)\n', (10014, 10079), True, 'import cv2 as cv\n'), ((11045, 11056), 'time.time', 'time.time', ([], {}), '()\n', (11054, 11056), False, 'import time\n'), ((11200, 11251), 'os.path.join', 'os.path.join', (['root_folder', '"""data"""', '"""input"""', '"""IMG_*"""'], {}), "(root_folder, 'data', 'input', 'IMG_*')\n", (11212, 11251), False, 'import os\n'), ((11371, 11432), 'os.path.join', 'os.path.join', (['root_folder', '"""data"""', '"""style_transfer"""', '"""*.jpeg"""'], {}), "(root_folder, 'data', 'style_transfer', '*.jpeg')\n", (11383, 11432), False, 'import os\n'), ((2664, 2691), 'mxnet.image.imread', 'image.imread', (['content_image'], {}), '(content_image)\n', (2676, 2691), False, 'from mxnet import image\n'), ((2959, 2984), 'mxnet.image.imread', 'image.imread', (['style_image'], {}), '(style_image)\n', (2971, 2984), False, 'from mxnet import image\n'), ((3664, 3696), 'mxnet.nd.array', 'mx.nd.array', (['img'], {'dtype': 'np.int32'}), '(img, dtype=np.int32)\n', (3675, 3696), True, 'import mxnet as mx\n'), ((5238, 5257), 'mxnet.np.square', 'np.square', (['y_hat', 'y'], {}), '(y_hat, y)\n', (5247, 5257), False, 'from mxnet import np, npx\n'), ((6697, 6714), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (6712, 6714), False, 'from mxnet import autograd\n'), ((7904, 7938), 'cv2.cvtColor', 'cv.cvtColor', (['out', 'cv.COLOR_RGB2BGR'], {}), '(out, cv.COLOR_RGB2BGR)\n', (7915, 7938), True, 'import cv2 as cv\n'), ((8560, 8600), 'os.path.basename', 'os.path.basename', (['content_image_filepath'], {}), '(content_image_filepath)\n', (8576, 8600), False, 'import os\n'), ((5647, 5695), 'mxnet.np.abs', 'np.abs', (['(y_hat[:, :, 1:, :] - y_hat[:, :, :-1, :])'], {}), '(y_hat[:, :, 1:, :] - y_hat[:, :, :-1, :])\n', (5653, 5695), False, 'from mxnet import np, npx\n'), ((5727, 5775), 'mxnet.np.abs', 'np.abs', (['(y_hat[:, :, :, 1:] - y_hat[:, :, :, :-1])'], {}), '(y_hat[:, :, :, 1:] - y_hat[:, :, :, :-1])\n', (5733, 5775), False, 'from mxnet import np, npx\n'), ((11756, 11767), 'time.time', 'time.time', ([], {}), '()\n', (11765, 11767), False, 'import time\n'), ((12311, 12322), 'time.time', 'time.time', ([], {}), '()\n', (12320, 12322), False, 'import time\n'), ((11799, 11835), 'os.path.exists', 'os.path.exists', (['style_image_filename'], {}), '(style_image_filename)\n', (11813, 11835), False, 'import os\n'), ((11959, 11997), 'os.path.exists', 'os.path.exists', (['content_image_filename'], {}), '(content_image_filename)\n', (11973, 11997), False, 'import os\n'), ((12371, 12399), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(toc - tic)'}), '(seconds=toc - tic)\n', (12380, 12399), False, 'from datetime import timedelta\n')] |
''' Common Verify functions for IOX / app-hosting '''
import logging
import time
log = logging.getLogger(__name__)
# Import parser
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
def verify_app_requested_state(device, app_list=None, requested_state='RUNNING', max_time=120, interval=10):
'''
verify_app_requested_state
Check show app-hosting list and confirm the requested state of the passed in list of appids
Args:
device ('obj') : Device object
app_list ('list') : list of appids
requested_state ('str') : requested state of appid
max_time ('int') : max time to wait
interval ('int') : interval timer
Returns:
True
False
Raises:
None
'''
all_apps_achieved_requested_state = False
if app_list is None:
app_list = []
timeout = Timeout(max_time=max_time, interval=interval)
while timeout.iterate():
try:
old_timeout = device.execute.timeout
device.execute.timeout = 120
output = device.parse('show app-hosting list')
device.execute.timeout = old_timeout
except SchemaEmptyParserError:
timeout.sleep()
continue
for app in app_list:
if output['app_id'][app]['state'] == requested_state:
all_apps_achieved_requested_state = True
continue
else:
log.info("App name %s not in the requested state %s yet, wait" % (app, requested_state))
all_apps_achieved_requested_state = False
timeout.sleep()
if all_apps_achieved_requested_state:
break
if all_apps_achieved_requested_state:
log.info("All Apps achieved the requested state!")
else:
log.error("Not all apps achieved the requested state!")
return all_apps_achieved_requested_state
def verify_iox_enabled(device, max_time=600, interval=10):
'''
verify_iox_enabled
Check show iox and confirm all services are up and running
Args:
device ('obj') : Device object
max_time ('int') : max time to wait
interval ('int') : interval timer
Returns:
True
False
Raises:
None
'''
timeout = Timeout(max_time=max_time, interval=interval)
while timeout.iterate():
try:
output = device.parse("show iox")
except SchemaEmptyParserError:
timeout.sleep()
continue
if output.get('caf_service', '').strip().lower() == 'running' and \
output.get('ha_service', '').strip().lower() == 'running' and \
output.get('ioxman_service', '').strip().lower() == 'running' and \
output.get('libvirtd', '').strip().lower() == 'running' and \
output.get('dockerd', '').strip().lower() == 'running':
log.info("IOX is enabled")
return True
else:
timeout.sleep()
log.info("IOX was not enabled!")
return False
def verify_iox_disabled(device, max_time=600, interval=10, redundancy=False):
'''
verify_iox_disabled
Check show iox and confirm all services are not running
Args:
device ('obj') : Device object
max_time ('int') : max time to wait
interval ('int') : interval timer
Returns:
True
False
Raises:
None
'''
timeout = Timeout(max_time=max_time, interval=interval)
while timeout.iterate():
try:
output = device.parse("show iox")
except SchemaEmptyParserError:
timeout.sleep()
continue
if output.get('caf_service', '').strip().lower() == 'not running' and \
output.get('ha_service', '').strip().lower() == 'not running' and \
output.get('ioxman_service', '').strip().lower() == 'not running' and \
output.get('dockerd', '').strip().lower() == 'not running':
log.info("IOX is disabled")
return True
else:
timeout.sleep()
log.info("IOX was not disabled!")
return False
| [
"logging.getLogger",
"genie.utils.timeout.Timeout"
] | [((94, 121), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (111, 121), False, 'import logging\n'), ((955, 1000), 'genie.utils.timeout.Timeout', 'Timeout', ([], {'max_time': 'max_time', 'interval': 'interval'}), '(max_time=max_time, interval=interval)\n', (962, 1000), False, 'from genie.utils.timeout import Timeout\n'), ((2452, 2497), 'genie.utils.timeout.Timeout', 'Timeout', ([], {'max_time': 'max_time', 'interval': 'interval'}), '(max_time=max_time, interval=interval)\n', (2459, 2497), False, 'from genie.utils.timeout import Timeout\n'), ((3666, 3711), 'genie.utils.timeout.Timeout', 'Timeout', ([], {'max_time': 'max_time', 'interval': 'interval'}), '(max_time=max_time, interval=interval)\n', (3673, 3711), False, 'from genie.utils.timeout import Timeout\n')] |
# import scipy.signal
from gym.spaces import Box, Discrete
import numpy as np
import torch
from torch import nn
import IPython
# from torch.nn import Parameter
import torch.nn.functional as F
from torch.distributions import Independent, OneHotCategorical, Categorical
from torch.distributions.normal import Normal
# # from torch.distributions.categorical import Categorical
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes) - 1):
act = activation if j < len(sizes) - 2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j + 1]), act()]
return nn.Sequential(*layers)
class Actor(nn.Module):
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward(self, obs, act=None):
# Produce action distributions for given observations, and optionally
# compute the log likelihood of given actions under those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class MLPCategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.logits_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
logits = self.logits_net(obs)
return Categorical(logits=logits)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act)
class MLPGaussianActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = nn.Parameter(torch.as_tensor(log_std))
self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution
class MLPCritic(nn.Module):
def __init__(self, obs_dim, hidden_sizes, activation):
super().__init__()
self.v_net = mlp([obs_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs):
return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
class MLPActorCritic(nn.Module):
def __init__(self, observation_space, action_space,
hidden_sizes=(64, 64), activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
# policy builder depends on action space
if isinstance(action_space, Box):
self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation)
elif isinstance(action_space, Discrete):
self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation)
# build value function critics
self.v = MLPCritic(obs_dim, hidden_sizes, activation)
self.vc = MLPCritic(obs_dim, hidden_sizes, activation)
def step(self, obs):
with torch.no_grad():
pi = self.pi._distribution(obs)
# print("pi dist! ", pi)
a = pi.sample()
logp_a = self.pi._log_prob_from_distribution(pi, a)
v = self.v(obs)
vc = self.vc(obs)
return a.numpy(), v.numpy(), vc.numpy(), logp_a.numpy()
def act(self, obs):
return self.step(obs)[0]
class MEMOActor(nn.Module):
def __init__(self, state_dim, hidden_size, action_dim, activation=nn.Tanh):
super(MEMOActor, self).__init__()
log_std = -0.5 * np.ones(action_dim, dtype=np.float32)
self.log_std = nn.Parameter(torch.as_tensor(log_std))
self.mu_net = mlp([state_dim] + hidden_size + [action_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def forward(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
# the critic is error here would be: reward + gamma*V(s_t+1)-V(s_t)
# http://incompleteideas.net/book/first/ebook/node66.html
class MEMO(nn.Module):
"""Multiple Experts, Multiple Objectives;
"""
def __init__(self, obs_dim, out_dim, encoder_hidden, decoder_hidden, actor_hidden, latent_modes):
'''
:param obs_dim:
:param latent_dim:
:param out_dim:
:param encoder_hidden:
:param decoder_hidden:
'''
super(MEMO, self).__init__()
self.found_contexts = []
self.latent_modes = latent_modes
self.num_embeddings = self.latent_modes
self.embedding_dim = obs_dim
self.vq_encoder = VQEncoder(obs_dim, self.embedding_dim) # original
self.prenet = nn.Linear(self.embedding_dim, self.embedding_dim)
self.vector_quantizer = VectorQuantizer(self.num_embeddings, self.embedding_dim)
self.postnet = nn.Linear(self.embedding_dim, encoder_hidden[-1])
self.vq_decoder = VQDecoder(encoder_hidden[-1], decoder_hidden, obs_dim)
self.action_decoder = MEMOActor(state_dim=obs_dim + self.latent_modes, hidden_size=actor_hidden, action_dim=out_dim)
# self.action_gaussian = GaussianActor(obs_dim=obs_dim + self.latent_modes, act_dim=out_dim,
# hidden_sizes=[128]*4, activation=nn.LeakyReLU)
self.action_vq_dist = None
def compute_quantized_loss(self, state, delta_state, actions):
'''
:param state:
:param delta_state:
:param actions:
:return:
'''
delta_state_enc = self.vq_encoder(delta_state) # In: [B, OBS_DIM]; Out: # [B, OBS_DIM]
encoder_output = self.prenet(delta_state_enc) # In: [B, OBS_DIM]; Out: # [B, OBS_DIM]
quantized, categorical_proposal, categorical_proposal_prob = self.vector_quantizer(encoder_output)
# update the set of known contexts
self.found_contexts = set([t.data.item() for t in categorical_proposal])
# Straight Through Estimator (Some Magic)
st_quantized = encoder_output + (quantized - encoder_output).detach()
post_quantized = self.postnet(st_quantized)
# print("Post Quantized: ", post_quantized)
reconstruction = self.vq_decoder(post_quantized)
# print("Reconstruction: ", reconstruction)
categorical_proposal_reshape = torch.reshape(categorical_proposal, (-1, 1))
categorical_proposal_onehot = F.one_hot(categorical_proposal_reshape, self.latent_modes).squeeze().float()
# total_max = torch.tensor(0.)
# print("distances max: ", max(total_max, torch.max(categorical_proposal_prob)))
# concat_state_vq = torch.cat([state, categorical_proposal_onehot], dim=-1)
concat_state_vq = torch.cat([state, categorical_proposal_prob], dim=-1)
action_vq_dist = self.action_decoder(concat_state_vq)
return encoder_output, quantized, reconstruction, categorical_proposal, action_vq_dist
# return encoder_output, quantized, reconstruction, categorical_proposal, action_mse
def act(self, state, context_label):
concat_state_vq = torch.cat([state, torch.reshape(torch.as_tensor(context_label), (-1,))], dim=-1)
action_vq_dist = self.action_decoder(concat_state_vq)
action = action_vq_dist.sample()
return action
def forward(self, X, Delta_X, A, kl_beta=1., recon_gamma=1.):
"""
Given input tensor, forward propagate, compute the loss, and backward propagate.
Represents the lifecycle of a single iteration
:param x: Raw state tensor
:param Delta_x: State difference tensor
:param a: Action tensor
:param kl_beta: KL divergence temperance factor
:param recon_gamma: State weights
: Important to note that both recon and context loss cannot be negative.
"""
encoder_output, quantized, reconstruction, vq_latent_labels, action_vq_dist =\
self.compute_quantized_loss(X, Delta_X, A)
vq_criterion = VQCriterion(beta=kl_beta)
vq_total_loss, recons_loss, vq_loss, commitment_loss = vq_criterion(Delta_X, encoder_output, quantized, reconstruction)
# original formula
loss_pi = (torch.tensor(1.)/(torch.exp(action_vq_dist.log_prob(A)) + torch.tensor(0.1))).sum(axis=-1)
loss = loss_pi * vq_total_loss
return loss, loss_pi, X, vq_latent_labels, vq_total_loss
class VQEncoder(nn.Module):
def __init__(self, in_dim, out_dim):
super(VQEncoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(in_dim, out_dim // 2),
nn.Tanh(),
nn.Linear(out_dim // 2, out_dim),
nn.Tanh()
)
# self.net = nn.Sequential(
# nn.Linear(in_dim, out_dim),
# nn.Tanh(),
# nn.Linear(out_dim, out_dim),
# nn.Tanh(),
# nn.Linear(out_dim, out_dim),
# nn.Tanh()
# )
def forward(self, input):
return self.net(input)
class Clamper(nn.Module):
def __init__(self, min=None, max=None):
super().__init__()
self.min = min
self.max = max
def forward(self, input):
return torch.clamp(input, self.min, self.max)
class VQDecoder(nn.Module):
def __init__(self, obs_dim, hidden_dim, out_dim, activation=nn.Tanh):
super().__init__()
self.initial_act = nn.Tanh()
self.net = mlp([obs_dim] + hidden_dim + [out_dim], activation)
def forward(self, input):
return self.net(self.initial_act(input))
class VectorQuantizer(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.num_embeddings = num_embeddings # E_N
self.embedding_dim = embedding_dim # E_D
self.embeddings = nn.Embedding(num_embeddings, embedding_dim)
self.scale = 1. / self.num_embeddings # decimal
print("Quantizer Scale: ", self.scale)
nn.init.uniform_(self.embeddings.weight, -self.scale, self.scale)
def proposal_distribution(self, input):
input_shape = input.shape # [B, OBS_DIM]
flatten_input = input.flatten(end_dim=-2).contiguous() # [B, OBS_DIM]
distances = (flatten_input ** 2).sum(dim=1, keepdim=True) # [B, 1]
distances = distances + (self.embeddings.weight ** 2).sum(dim=1) # [B, E_N]
distances -= 2 * flatten_input @ self.embeddings.weight.t() # [B, E_N]
categorical_posterior = torch.argmin(distances, dim=-1) # [B] # original
categorical_posterior_prob = distances
# categorical_posterior_prob = torch.clamp(distances, 0, 10) # 10 is a hyperparameter
# categorical_posterior_prob = torch.clamp(distances, 0, 5) # 5 is a hyperparameter
return categorical_posterior, categorical_posterior_prob
def forward(self, input):
proposal, proposal_prob = self.proposal_distribution(input) # [B]
quantized = self.embeddings(proposal).contiguous() # [B, OBS_DIM]
return quantized, proposal, proposal_prob
class VQCriterion(nn.Module):
"""
vq_loss: \| \text{sg}[I(x, e)] * e - \text{sg}[z_e(x)] \|_2^2
"""
def __init__(self, beta):
super().__init__()
self.beta = beta
def forward(self, input, encoder_output, quantized, reconstruction):
flatten_quantized = quantized.flatten(end_dim=-2)
flatten_encoder_output = encoder_output.flatten(end_dim=-2)
reconstruction_loss = F.mse_loss(input, reconstruction)
vq_loss = F.mse_loss(flatten_encoder_output.detach(), flatten_quantized)
commitment_loss = F.mse_loss(flatten_encoder_output, flatten_quantized.detach())
total_loss = reconstruction_loss + vq_loss + self.beta * commitment_loss # Original. TODO: review this loss.
return total_loss, reconstruction_loss, vq_loss, commitment_loss
class VDB(nn.Module):
def __init__(self, num_inputs, args):
super(VDB, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.z_size)
self.fc3 = nn.Linear(args.hidden_size, args.z_size)
self.fc4 = nn.Linear(args.z_size, args.hidden_size)
self.fc5 = nn.Linear(args.hidden_size, 1)
self.fc5.weight.data.mul_(0.1)
self.fc5.bias.data.mul_(0.0)
def encoder(self, x):
h = torch.tanh(self.fc1(x))
return self.fc2(h), self.fc3(h)
def reparameterize(self, mu, logvar):
std = torch.exp(logvar / 2)
eps = torch.randn_like(std)
return mu + std * eps
def discriminator(self, z):
h = torch.tanh(self.fc4(z))
return torch.sigmoid(self.fc5(h))
def forward(self, x):
mu, logvar = self.encoder(x)
z = self.reparameterize(mu, logvar)
prob = self.discriminator(z)
return prob, mu, logvar
###########################################################################3
from torch.autograd import Variable
from torch.distributions import Distribution, Normal
class TanhNormal(torch.distributions.Distribution):
"""
Represent distribution of X where
X ~ tanh(Z)
Z ~ N(mean, std)
Note: this is not very numerically stable.
"""
def __init__(self, normal_mean, normal_std, epsilon=1e-6):
"""
:param normal_mean: Mean of the normal distribution
:param normal_std: Std of the normal distribution
:param epsilon: Numerical stability epsilon when computing log-prob.
"""
self.normal_mean = normal_mean
self.normal_std = normal_std
self.normal = Normal(normal_mean, normal_std)
self.epsilon = epsilon
def sample_n(self, n, return_pre_tanh_value=False):
z = self.normal.sample_n(n)
if return_pre_tanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def log_prob(self, value, pre_tanh_value=None):
"""
:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""
if pre_tanh_value is None:
pre_tanh_value = torch.log(
(1+value) / (1-value)
) / 2
return self.normal.log_prob(pre_tanh_value) - torch.log(
1 - value * value + self.epsilon
)
def sample(self, return_pretanh_value=False):
z = self.normal.sample()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def rsample(self, return_pretanh_value=False):
z = (
self.normal_mean +
self.normal_std *
Variable(Normal(
np.zeros(self.normal_mean.size()),
np.ones(self.normal_std.size())
).sample())
)
# z.requires_grad_()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
| [
"torch.as_tensor",
"torch.distributions.Categorical",
"torch.nn.Tanh",
"torch.nn.Sequential",
"torch.exp",
"torch.tanh",
"torch.nn.init.uniform_",
"torch.nn.Embedding",
"torch.nn.functional.mse_loss",
"torch.distributions.Normal",
"numpy.ones",
"torch.randn_like",
"torch.nn.functional.one_ho... | [((628, 650), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (641, 650), False, 'from torch import nn\n'), ((1513, 1539), 'torch.distributions.Categorical', 'Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (1524, 1539), False, 'from torch.distributions import Independent, OneHotCategorical, Categorical\n'), ((2037, 2060), 'torch.exp', 'torch.exp', (['self.log_std'], {}), '(self.log_std)\n', (2046, 2060), False, 'import torch\n'), ((2076, 2091), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (2082, 2091), False, 'from torch.distributions import Distribution, Normal\n'), ((4125, 4148), 'torch.exp', 'torch.exp', (['self.log_std'], {}), '(self.log_std)\n', (4134, 4148), False, 'import torch\n'), ((4164, 4179), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (4170, 4179), False, 'from torch.distributions import Distribution, Normal\n'), ((4253, 4276), 'torch.exp', 'torch.exp', (['self.log_std'], {}), '(self.log_std)\n', (4262, 4276), False, 'import torch\n'), ((4292, 4307), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (4298, 4307), False, 'from torch.distributions import Distribution, Normal\n'), ((5074, 5123), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_dim', 'self.embedding_dim'], {}), '(self.embedding_dim, self.embedding_dim)\n', (5083, 5123), False, 'from torch import nn\n'), ((5236, 5285), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_dim', 'encoder_hidden[-1]'], {}), '(self.embedding_dim, encoder_hidden[-1])\n', (5245, 5285), False, 'from torch import nn\n'), ((6716, 6760), 'torch.reshape', 'torch.reshape', (['categorical_proposal', '(-1, 1)'], {}), '(categorical_proposal, (-1, 1))\n', (6729, 6760), False, 'import torch\n'), ((7115, 7168), 'torch.cat', 'torch.cat', (['[state, categorical_proposal_prob]'], {'dim': '(-1)'}), '([state, categorical_proposal_prob], dim=-1)\n', (7124, 7168), False, 'import torch\n'), ((9586, 9624), 'torch.clamp', 'torch.clamp', (['input', 'self.min', 'self.max'], {}), '(input, self.min, self.max)\n', (9597, 9624), False, 'import torch\n'), ((9782, 9791), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9789, 9791), False, 'from torch import nn\n'), ((10190, 10233), 'torch.nn.Embedding', 'nn.Embedding', (['num_embeddings', 'embedding_dim'], {}), '(num_embeddings, embedding_dim)\n', (10202, 10233), False, 'from torch import nn\n'), ((10347, 10412), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.embeddings.weight', '(-self.scale)', 'self.scale'], {}), '(self.embeddings.weight, -self.scale, self.scale)\n', (10363, 10412), False, 'from torch import nn\n'), ((10859, 10890), 'torch.argmin', 'torch.argmin', (['distances'], {'dim': '(-1)'}), '(distances, dim=-1)\n', (10871, 10890), False, 'import torch\n'), ((11868, 11901), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['input', 'reconstruction'], {}), '(input, reconstruction)\n', (11878, 11901), True, 'import torch.nn.functional as F\n'), ((12387, 12426), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'args.hidden_size'], {}), '(num_inputs, args.hidden_size)\n', (12396, 12426), False, 'from torch import nn\n'), ((12446, 12486), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', 'args.z_size'], {}), '(args.hidden_size, args.z_size)\n', (12455, 12486), False, 'from torch import nn\n'), ((12506, 12546), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', 'args.z_size'], {}), '(args.hidden_size, args.z_size)\n', (12515, 12546), False, 'from torch import nn\n'), ((12566, 12606), 'torch.nn.Linear', 'nn.Linear', (['args.z_size', 'args.hidden_size'], {}), '(args.z_size, args.hidden_size)\n', (12575, 12606), False, 'from torch import nn\n'), ((12626, 12656), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', '(1)'], {}), '(args.hidden_size, 1)\n', (12635, 12656), False, 'from torch import nn\n'), ((12894, 12915), 'torch.exp', 'torch.exp', (['(logvar / 2)'], {}), '(logvar / 2)\n', (12903, 12915), False, 'import torch\n'), ((12930, 12951), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (12946, 12951), False, 'import torch\n'), ((14018, 14049), 'torch.distributions.Normal', 'Normal', (['normal_mean', 'normal_std'], {}), '(normal_mean, normal_std)\n', (14024, 14049), False, 'from torch.distributions import Distribution, Normal\n'), ((575, 608), 'torch.nn.Linear', 'nn.Linear', (['sizes[j]', 'sizes[j + 1]'], {}), '(sizes[j], sizes[j + 1])\n', (584, 608), False, 'from torch import nn\n'), ((1779, 1813), 'numpy.ones', 'np.ones', (['act_dim'], {'dtype': 'np.float32'}), '(act_dim, dtype=np.float32)\n', (1786, 1813), True, 'import numpy as np\n'), ((1850, 1874), 'torch.as_tensor', 'torch.as_tensor', (['log_std'], {}), '(log_std)\n', (1865, 1874), False, 'import torch\n'), ((3316, 3331), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3329, 3331), False, 'import torch\n'), ((3866, 3903), 'numpy.ones', 'np.ones', (['action_dim'], {'dtype': 'np.float32'}), '(action_dim, dtype=np.float32)\n', (3873, 3903), True, 'import numpy as np\n'), ((3940, 3964), 'torch.as_tensor', 'torch.as_tensor', (['log_std'], {}), '(log_std)\n', (3955, 3964), False, 'import torch\n'), ((8948, 8979), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(out_dim // 2)'], {}), '(in_dim, out_dim // 2)\n', (8957, 8979), False, 'from torch import nn\n'), ((8993, 9002), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9000, 9002), False, 'from torch import nn\n'), ((9016, 9048), 'torch.nn.Linear', 'nn.Linear', (['(out_dim // 2)', 'out_dim'], {}), '(out_dim // 2, out_dim)\n', (9025, 9048), False, 'from torch import nn\n'), ((9062, 9071), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9069, 9071), False, 'from torch import nn\n'), ((14277, 14290), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14287, 14290), False, 'import torch\n'), ((14648, 14691), 'torch.log', 'torch.log', (['(1 - value * value + self.epsilon)'], {}), '(1 - value * value + self.epsilon)\n', (14657, 14691), False, 'import torch\n'), ((14900, 14913), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14910, 14913), False, 'import torch\n'), ((15334, 15347), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (15344, 15347), False, 'import torch\n'), ((14227, 14240), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14237, 14240), False, 'import torch\n'), ((14527, 14563), 'torch.log', 'torch.log', (['((1 + value) / (1 - value))'], {}), '((1 + value) / (1 - value))\n', (14536, 14563), False, 'import torch\n'), ((14850, 14863), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14860, 14863), False, 'import torch\n'), ((15284, 15297), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (15294, 15297), False, 'import torch\n'), ((7521, 7551), 'torch.as_tensor', 'torch.as_tensor', (['context_label'], {}), '(context_label)\n', (7536, 7551), False, 'import torch\n'), ((8591, 8608), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (8603, 8608), False, 'import torch\n'), ((6799, 6857), 'torch.nn.functional.one_hot', 'F.one_hot', (['categorical_proposal_reshape', 'self.latent_modes'], {}), '(categorical_proposal_reshape, self.latent_modes)\n', (6808, 6857), True, 'import torch.nn.functional as F\n'), ((8649, 8666), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (8661, 8666), False, 'import torch\n')] |
from django.shortcuts import render
from django.http import HttpResponse
import datetime as dt
from django.views import View
from photos.models import Image, category
# Create your views here.
def welcome(request):
return render(request, 'welcome.html')
def display_page(request):
image = Image.objects.all()
return render(request, 'all.html', {'image':image} )
def viewDetails(request):
image = Image.objects.all()
return render(request, 'details.html', {'image':image} )
def my_category(request):
categorys = category.objects.all()
context = {
'categorys': categorys,
}
return render(request, 'category.html', context)
def search_results(request):
if 'category_name' in request.GET and request.GET["category_name"]:
search_term = request.GET.get("category_name")
searched_category_name = category.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"category_name": searched_category_name})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
| [
"django.shortcuts.render",
"photos.models.category.objects.all",
"photos.models.Image.objects.all",
"photos.models.category.search_by_category"
] | [((229, 260), 'django.shortcuts.render', 'render', (['request', '"""welcome.html"""'], {}), "(request, 'welcome.html')\n", (235, 260), False, 'from django.shortcuts import render\n'), ((301, 320), 'photos.models.Image.objects.all', 'Image.objects.all', ([], {}), '()\n', (318, 320), False, 'from photos.models import Image, category\n'), ((332, 377), 'django.shortcuts.render', 'render', (['request', '"""all.html"""', "{'image': image}"], {}), "(request, 'all.html', {'image': image})\n", (338, 377), False, 'from django.shortcuts import render\n'), ((417, 436), 'photos.models.Image.objects.all', 'Image.objects.all', ([], {}), '()\n', (434, 436), False, 'from photos.models import Image, category\n'), ((448, 497), 'django.shortcuts.render', 'render', (['request', '"""details.html"""', "{'image': image}"], {}), "(request, 'details.html', {'image': image})\n", (454, 497), False, 'from django.shortcuts import render\n'), ((541, 563), 'photos.models.category.objects.all', 'category.objects.all', ([], {}), '()\n', (561, 563), False, 'from photos.models import Image, category\n'), ((629, 670), 'django.shortcuts.render', 'render', (['request', '"""category.html"""', 'context'], {}), "(request, 'category.html', context)\n", (635, 670), False, 'from django.shortcuts import render\n'), ((868, 908), 'photos.models.category.search_by_category', 'category.search_by_category', (['search_term'], {}), '(search_term)\n', (895, 908), False, 'from photos.models import Image, category\n'), ((960, 1057), 'django.shortcuts.render', 'render', (['request', '"""search.html"""', "{'message': message, 'category_name': searched_category_name}"], {}), "(request, 'search.html', {'message': message, 'category_name':\n searched_category_name})\n", (966, 1057), False, 'from django.shortcuts import render\n'), ((1131, 1183), 'django.shortcuts.render', 'render', (['request', '"""search.html"""', "{'message': message}"], {}), "(request, 'search.html', {'message': message})\n", (1137, 1183), False, 'from django.shortcuts import render\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This module provides a command line interface to news_munger. """
import datetime
import random
import argparse
from munger import DocumentCatalog, Munger
parser = argparse.ArgumentParser()
parser.parse_args()
## Classes ##
class MadLib(Munger):
"""Real soon now. """
def build(self):
pass
def __repr__(self):
return "<MadLib: {}>".format(self.headline)
class ExquisiteCorpse(Munger):
"""
A fake news article composed of sentence fragments gleaned from the day's
headlines, in the style of surrealist party game 'Exquisite Corpse'.
See: https://en.wikipedia.org/wiki/Exquisite_corpse
"""
def __init__(self, documents):
"""Initialize super; and declare corpse list. """
super().__init__(documents)
self.corpses = []
def build(self):
"""Munge news stories to create an esquisite cadavre. """
text = ""
base_index = random.randrange(len(self._documents))
base = self._documents[base_index]
sentences = []
for i, sent in enumerate(base.sents):
stuple = (base_index, i, sent.root.lemma_, sent)
if stuple[2] == "say":
sentence = self.munge_sayings(stuple)
elif stuple[2] in ["be", "do", "have"]:
sentence = self.munge_children(stuple)
else:
sentence = self.munge_on_roots(stuple)
sentences.append(sentence)
self.corpses.append({"title": base._.title, "sentences": sentences})
text += "\n".join([sent[-1].text_with_ws for sent in sentences])
print(text)
def save(self, cadavre=None):
""" Write the cadavre(s) to a file. """
filename = datetime.datetime.today().strftime("tmp/exq_%Y%m%d.txt")
if cadavre:
corpses = [cadavre]
else:
corpses = self.corpses
with open(filename, "a+") as file:
for corpse in corpses:
file.write(f"{corpse['title']}\n\n")
for sent in corpse["sentences"]:
file.write(sent[-1].text_with_ws)
file.write("\n******\n\n")
def __repr__(self):
return "<ExquisiteCorpse: {}>".format(self.headline)
if __name__ == "__main__":
catalog = DocumentCatalog()
# Unit Tests #
| [
"datetime.datetime.today",
"munger.DocumentCatalog",
"argparse.ArgumentParser"
] | [((218, 243), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (241, 243), False, 'import argparse\n'), ((2345, 2362), 'munger.DocumentCatalog', 'DocumentCatalog', ([], {}), '()\n', (2360, 2362), False, 'from munger import DocumentCatalog, Munger\n'), ((1780, 1805), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1803, 1805), False, 'import datetime\n')] |
"""This module contains the code related to the DAG and the scheduler."""
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from networkx.drawing import nx_pydot
from pipeline.shared import ensure_list
BLUE = "#547482"
YELLOW_TO_RED = ["#C8B05C", "#C89D64", "#F1B05D", "#EE8445", "#C87259", "#6C4A4D"]
class Scheduler:
"""This class allows to schedule tasks.
The functionality is inspired by func:`networkx.topological_sort` which allows to
loop over a directed acyclic graph such that all preceding nodes are executed before
a dependent node.
The scheduler keeps track of all unfinished tasks and their dependencies in the
`task_dict`. If a task has no dependencies, it is eligible to be executed. All
submitted tasks are remove from `task_dict`. If a task finishes, it is removed as a
dependency from all tasks in `task_dict`.
The scheduler can take task priorities into account and proposes only tasks
with the highest priorities.
"""
def __init__(self, dag, unfinished_tasks, priority):
self.dag = dag
self.task_dict = self._create_task_dependency_dict(unfinished_tasks)
self.submitted_tasks = set()
self.priority = priority
def _create_task_dependency_dict(self, unfinished_tasks):
"""Create a task-dependency dictionary.
For each unfinished task, this function collects the tasks which have to be
executed in advance.
"""
task_dict = {}
for id_ in unfinished_tasks:
task_dict[id_] = {
preceding_task
for dependency in ensure_list(self.dag.nodes[id_].get("depends_on", []))
for preceding_task in self.dag.predecessors(dependency)
if preceding_task in unfinished_tasks
}
return task_dict
def propose(self, n_proposals=1):
"""Propose a number of tasks.
This function proposes tasks which can be executed. If a task is proposed,
remove it from the `task_dict`.
Parameters
----------
n_proposals : int
Number of tasks which should be proposed. For any nonnegative number, return
a set of task ids. For `-1` return all possible tasks.
Returns
-------
proposals : set
A set of task ids which should be executed.
"""
# Get task candidates.
candidates = [id_ for id_ in self.task_dict if len(self.task_dict[id_]) == 0]
if self.priority:
candidates = sorted(
candidates,
key=lambda id_: self.dag.nodes[id_]["priority"],
reverse=True,
)
if 0 <= n_proposals:
proposals = set(candidates[:n_proposals])
elif n_proposals == -1:
proposals = set(candidates)
else:
raise NotImplementedError
self.submitted_tasks = self.submitted_tasks.union(proposals)
for id_ in proposals:
del self.task_dict[id_]
return proposals
def process_finished(self, finished_tasks):
"""Process finished tasks.
The executor passes an id or a list of ids of finished tasks back to the
scheduler. The scheduler removes the ids from the set of submitted tasks and
removes the finished tasks from the dependency sets of all unfinished tasks in
`task_dict`.
Parameters
----------
finished_tasks : str or list
An id or a list of ids of finished tasks.
"""
finished_tasks = ensure_list(finished_tasks)
for id_ in finished_tasks:
self.submitted_tasks.remove(id_)
for id__ in self.task_dict:
self.task_dict[id__].discard(id_)
@property
def are_tasks_left(self):
return len(self.task_dict) != 0 or len(self.submitted_tasks) != 0
def create_dag(tasks, config):
"""Create a directed acyclic graph (DAG) capturing dependencies between functions.
Parameters
----------
tasks : dict
Dictionary containing tasks.
Returns
-------
dag : nx.DiGraph
The directed acyclic graph.
"""
dag_dict = _create_dag_dict(tasks)
dag = nx.DiGraph(dag_dict).reverse()
dag = _insert_tasks_in_dag(dag, tasks)
dag = _assign_priority_to_nodes(dag, config)
_draw_dag(dag, config)
return dag
def _create_dag_dict(tasks):
dag_dict = {}
for id_, task_info in tasks.items():
# Add the task to the graph as a node.
depends_on = ensure_list(task_info.get("depends_on", [])).copy()
depends_on.extend(ensure_list(task_info.get("template", [])))
depends_on.append(task_info["config"])
dag_dict[id_] = depends_on
# If the task produces anything, register the output as a node.
for target in ensure_list(task_info.get("produces", [])):
dag_dict[target] = [id_]
return dag_dict
def _insert_tasks_in_dag(dag, tasks):
for id_ in dag.nodes:
if id_ in tasks:
dag.nodes[id_].update(**tasks[id_], _is_task=True)
else:
dag.nodes[id_].update(_is_task=False)
return dag
def _assign_priority_to_nodes(dag, config):
"""Assign a priority to a node.
Task priorities trickle down from the last nodes in the DAG to the first nodes. The
total priority of a task is its own priority plus the discounted sum of priorities
of its targets.
"""
discount_factor = config["priority_discount_factor"]
reversed_dag = dag.reverse()
for id_ in nx.topological_sort(reversed_dag):
if reversed_dag.nodes[id_]["_is_task"] and config["priority_scheduling"]:
sum_priorities = 0
for pre in reversed_dag.predecessors(id_):
for pre_task in reversed_dag.predecessors(pre):
sum_priorities += dag.nodes[pre_task].get("priority", 0)
dag.nodes[id_]["priority"] = (
dag.nodes[id_].get("priority", 0) + discount_factor * sum_priorities
)
else:
pass
return dag
def _draw_dag(dag, config):
fig, ax = plt.subplots(figsize=(16, 12))
fig.suptitle("Task Graph", fontsize=24)
# Relabel absolute paths to path names.
project_directory = Path(config["project_directory"])
mapping = {
node: Path(node).relative_to(project_directory)
for node in dag.nodes
if Path(node).is_absolute()
}
dag = nx.relabel_nodes(dag, mapping)
layout = nx_pydot.pydot_layout(dag, prog="dot")
nx.draw_networkx_edges(dag, pos=layout, ax=ax)
nx.draw_networkx_labels(dag, pos=layout, ax=ax)
# Draw non-task nodes.
non_task_nodes = [node for node in dag.nodes if not dag.nodes[node]["_is_task"]]
nx.draw_networkx_nodes(
dag, pos=layout, nodelist=non_task_nodes, node_color=BLUE, ax=ax
)
task_nodes = [node for node in dag.nodes if dag.nodes[node]["_is_task"]]
if config["priority_scheduling"]:
node_size = np.array([dag.nodes[node]["priority"] for node in task_nodes])
node_size_demeaned = node_size - node_size.min()
node_size_relative = node_size_demeaned / node_size_demeaned.max()
node_size = node_size_relative * 1_000 + 300
cmap = LinearSegmentedColormap.from_list("cmap", YELLOW_TO_RED)
priority_kwargs = {
"node_size": node_size,
"node_color": node_size_relative,
"cmap": cmap,
}
else:
priority_kwargs = {"node_color": BLUE}
im = nx.draw_networkx_nodes(
dag, pos=layout, nodelist=task_nodes, **priority_kwargs, ax=ax
)
if config["priority_scheduling"]:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.1)
fig.colorbar(im, cax=cax, orientation="vertical")
cax.set_title("Priority")
path = Path(config["hidden_build_directory"], ".dag.png")
path.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(path)
plt.close()
| [
"networkx.relabel_nodes",
"matplotlib.pyplot.savefig",
"networkx.topological_sort",
"pathlib.Path",
"networkx.drawing.nx_pydot.pydot_layout",
"networkx.DiGraph",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"networkx.draw_networkx_nodes",
"matplotlib.pyplot.close",
"numpy.array",
"netw... | [((5769, 5802), 'networkx.topological_sort', 'nx.topological_sort', (['reversed_dag'], {}), '(reversed_dag)\n', (5788, 5802), True, 'import networkx as nx\n'), ((6347, 6377), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (6359, 6377), True, 'import matplotlib.pyplot as plt\n'), ((6492, 6525), 'pathlib.Path', 'Path', (["config['project_directory']"], {}), "(config['project_directory'])\n", (6496, 6525), False, 'from pathlib import Path\n'), ((6680, 6710), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['dag', 'mapping'], {}), '(dag, mapping)\n', (6696, 6710), True, 'import networkx as nx\n'), ((6725, 6763), 'networkx.drawing.nx_pydot.pydot_layout', 'nx_pydot.pydot_layout', (['dag'], {'prog': '"""dot"""'}), "(dag, prog='dot')\n", (6746, 6763), False, 'from networkx.drawing import nx_pydot\n'), ((6769, 6815), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['dag'], {'pos': 'layout', 'ax': 'ax'}), '(dag, pos=layout, ax=ax)\n', (6791, 6815), True, 'import networkx as nx\n'), ((6820, 6867), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['dag'], {'pos': 'layout', 'ax': 'ax'}), '(dag, pos=layout, ax=ax)\n', (6843, 6867), True, 'import networkx as nx\n'), ((6985, 7078), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['dag'], {'pos': 'layout', 'nodelist': 'non_task_nodes', 'node_color': 'BLUE', 'ax': 'ax'}), '(dag, pos=layout, nodelist=non_task_nodes, node_color\n =BLUE, ax=ax)\n', (7007, 7078), True, 'import networkx as nx\n'), ((7757, 7848), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['dag'], {'pos': 'layout', 'nodelist': 'task_nodes', 'ax': 'ax'}), '(dag, pos=layout, nodelist=task_nodes, **\n priority_kwargs, ax=ax)\n', (7779, 7848), True, 'import networkx as nx\n'), ((8106, 8156), 'pathlib.Path', 'Path', (["config['hidden_build_directory']", '""".dag.png"""'], {}), "(config['hidden_build_directory'], '.dag.png')\n", (8110, 8156), False, 'from pathlib import Path\n'), ((8212, 8229), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (8223, 8229), True, 'import matplotlib.pyplot as plt\n'), ((8234, 8245), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8243, 8245), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3781), 'pipeline.shared.ensure_list', 'ensure_list', (['finished_tasks'], {}), '(finished_tasks)\n', (3765, 3781), False, 'from pipeline.shared import ensure_list\n'), ((7224, 7286), 'numpy.array', 'np.array', (["[dag.nodes[node]['priority'] for node in task_nodes]"], {}), "([dag.nodes[node]['priority'] for node in task_nodes])\n", (7232, 7286), True, 'import numpy as np\n'), ((7488, 7544), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""cmap"""', 'YELLOW_TO_RED'], {}), "('cmap', YELLOW_TO_RED)\n", (7521, 7544), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((7915, 7938), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (7934, 7938), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((4416, 4436), 'networkx.DiGraph', 'nx.DiGraph', (['dag_dict'], {}), '(dag_dict)\n', (4426, 4436), True, 'import networkx as nx\n'), ((6556, 6566), 'pathlib.Path', 'Path', (['node'], {}), '(node)\n', (6560, 6566), False, 'from pathlib import Path\n'), ((6639, 6649), 'pathlib.Path', 'Path', (['node'], {}), '(node)\n', (6643, 6649), False, 'from pathlib import Path\n')] |
import os, os.path, urllib.request, sys, getopt
def main(argv):
print(argv)
input_file = ''
download_dir = ''
try:
opts, args = getopt.getopt(argv,
"hi:d:",
["input-file=","download-dir="])
except getopt.GetoptError as err:
print(err)
print('download-files.py -i <input-file> -d <download-directory>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('download-files.py -i <input-file> -d <download-directory>')
sys.exit()
elif opt in ("-i", "--input-file"):
input_file = arg
elif opt in ("-d", "--download-dir"):
download_dir = arg
if(input_file != ''):
print("Opening input file...")
links = open(input_file, 'r')
else:
print("No Input file specified, trying default...")
links = open("urls.txt", 'r')
for link in links:
link = link.strip()
name = link.rsplit('/', 1)[-1]
if(download_dir == ''):
print("Download Directory not provided... using default.")
download_dir = os.getcwd() + '\downloads'
filename = os.path.join(download_dir, name)
if not os.path.isfile(filename):
print('Downloading: ' + filename)
try:
urllib.request.urlretrieve(link, filename)
except Exception as inst:
print(inst)
print('Continuing...')
if __name__ == "__main__":
main(sys.argv[1:])
| [
"getopt.getopt",
"os.path.join",
"os.getcwd",
"os.path.isfile",
"sys.exit"
] | [((153, 215), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hi:d:"""', "['input-file=', 'download-dir=']"], {}), "(argv, 'hi:d:', ['input-file=', 'download-dir='])\n", (166, 215), False, 'import os, os.path, urllib.request, sys, getopt\n'), ((1223, 1255), 'os.path.join', 'os.path.join', (['download_dir', 'name'], {}), '(download_dir, name)\n', (1235, 1255), False, 'import os, os.path, urllib.request, sys, getopt\n'), ((427, 438), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (435, 438), False, 'import os, os.path, urllib.request, sys, getopt\n'), ((581, 591), 'sys.exit', 'sys.exit', ([], {}), '()\n', (589, 591), False, 'import os, os.path, urllib.request, sys, getopt\n'), ((1272, 1296), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1286, 1296), False, 'import os, os.path, urllib.request, sys, getopt\n'), ((1176, 1187), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1185, 1187), False, 'import os, os.path, urllib.request, sys, getopt\n')] |
from spotify_auth import auth
from urllib import parse
import json
def search(track_name, artist, type='track'):
parsed = parse.quote_plus(query)
query = "artist:{}%20track:{}".format(artist, track_name)
response = auth.get(
'https://api.spotify.com/v1/search?q={}&type={}'.format(query, type))
response_object = json.loads(response.text)
return response_object
| [
"json.loads",
"urllib.parse.quote_plus"
] | [((128, 151), 'urllib.parse.quote_plus', 'parse.quote_plus', (['query'], {}), '(query)\n', (144, 151), False, 'from urllib import parse\n'), ((339, 364), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (349, 364), False, 'import json\n')] |
#!/usr/bin/env python
# coding: utf-8
# ## E2E Xgboost MLFLOW
# In[45]:
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, pandas_udf,udf,lit
import azure.synapse.ml.predict as pcontext
import azure.synapse.ml.predict.utils._logger as synapse_predict_logger
import numpy as np
import pandas as pd
import xgboost as xgb
import mlflow
# In[46]:
spark.conf.set("spark.synapse.ml.predict.enabled","true")
# ## Train and Save Model
# ### Training
# In[47]:
data = np.random.rand(5, 10) # 5 entities, each contains 10 features
label = np.random.randint(1, size=5) # binary target
dtrain = xgb.DMatrix(data, label=label)
xgr = xgb.XGBRFRegressor(objective='reg:linear', n_estimators=10, seed=123)
xgr.fit(data, label)
# In[48]:
xgr.save_model('./model.json')
# In[49]:
mlflow.pyfunc.save_model(
data_path='./model.json',
path='./xgboost_pyfunc_model_path',
loader_module='mlflow.xgboost')
# In[50]:
MODEL_URI = './xgboost_pyfunc_model_path'
RETURN_TYPES = 'float'
# In[51]:
model = pcontext.bind_model(
return_types = RETURN_TYPES,
runtime = 'mlflow',
model_alias = 'xgb_model',
model_uri = MODEL_URI,).register()
# In[52]:
type(model)
# In[53]:
data = np.random.rand(5, 10)
df = spark.createDataFrame(pd.DataFrame(data))
df.createOrReplaceTempView("data")
df.show()
# In[54]:
predictions = spark.sql(
"""
SELECT PREDICT('xgb_model', *) AS predict FROM data
"""
).show()
| [
"mlflow.pyfunc.save_model",
"numpy.random.rand",
"azure.synapse.ml.predict.bind_model",
"numpy.random.randint",
"pandas.DataFrame",
"xgboost.DMatrix",
"xgboost.XGBRFRegressor"
] | [((500, 521), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (514, 521), True, 'import numpy as np\n'), ((571, 599), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'size': '(5)'}), '(1, size=5)\n', (588, 599), True, 'import numpy as np\n'), ((626, 656), 'xgboost.DMatrix', 'xgb.DMatrix', (['data'], {'label': 'label'}), '(data, label=label)\n', (637, 656), True, 'import xgboost as xgb\n'), ((664, 733), 'xgboost.XGBRFRegressor', 'xgb.XGBRFRegressor', ([], {'objective': '"""reg:linear"""', 'n_estimators': '(10)', 'seed': '(123)'}), "(objective='reg:linear', n_estimators=10, seed=123)\n", (682, 733), True, 'import xgboost as xgb\n'), ((814, 937), 'mlflow.pyfunc.save_model', 'mlflow.pyfunc.save_model', ([], {'data_path': '"""./model.json"""', 'path': '"""./xgboost_pyfunc_model_path"""', 'loader_module': '"""mlflow.xgboost"""'}), "(data_path='./model.json', path=\n './xgboost_pyfunc_model_path', loader_module='mlflow.xgboost')\n", (838, 937), False, 'import mlflow\n'), ((1234, 1255), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (1248, 1255), True, 'import numpy as np\n'), ((1283, 1301), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1295, 1301), True, 'import pandas as pd\n'), ((1047, 1161), 'azure.synapse.ml.predict.bind_model', 'pcontext.bind_model', ([], {'return_types': 'RETURN_TYPES', 'runtime': '"""mlflow"""', 'model_alias': '"""xgb_model"""', 'model_uri': 'MODEL_URI'}), "(return_types=RETURN_TYPES, runtime='mlflow',\n model_alias='xgb_model', model_uri=MODEL_URI)\n", (1066, 1161), True, 'import azure.synapse.ml.predict as pcontext\n')] |
from collections import defaultdict
from .common import IGraph
''' Remove edges to create even trees.
You are given a tree with an even number of nodes. Consider each connection between a parent and child node to be an "edge". You
would like to remove some of these edges, such that the disconnected subtrees that remain each have an even number of nodes.
For example, suppose your input is the following tree:
1
/ \
2 3
/ \
4 5
/ | \
6 7 8
In this case, if we remove the edge (3, 4), both resulting subtrees will be even.
Write a function that returns the maximum number of edges you can remove while still satisfying this requirement.
'''
def max_edges1(graph):
def traverse(graph : IGraph, cur, result):
descendants = 0
for child in graph.neighbors(cur):
num_nodes, result = traverse(graph, child, result)
result[child] += num_nodes - 1
descendants += num_nodes
return descendants + 1, result
start = graph.root()
vertices = defaultdict(int)
_, descendants = traverse(graph, start, vertices)
return len([val for val in descendants.values() if val % 2 == 1]) | [
"collections.defaultdict"
] | [((1076, 1092), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1087, 1092), False, 'from collections import defaultdict\n')] |
import sys
import cv2
from keras.models import load_model
from matplotlib import pyplot as plt
import time
model = load_model("models/model.h5")
def find_faces(image):
face_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_default.xml')
face_rects = face_cascade.detectMultiScale(
image,
scaleFactor = 1.1,
minNeighbors = 22
)
return face_rects
def load_image(filepath):
image = cv2.imread(filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image, gray_image
def predict(gray_image):
face_rects = find_faces(gray_image)
for face_rect in face_rects:
x, y, w, h = face_rect
face = gray_image[y:y+h, x:x+w]
face = cv2.resize(face, (48, 48)).reshape((1, 48, 48, 1))
predicted_emotions = model.predict(face)[0]
best_emotion = 'happiness' if predicted_emotions[1] > predicted_emotions[0] else 'neutral'
# Create a json serializable result
yield dict(
border = dict(
x = float(x),
y = float(y),
width = float(w),
height = float(h),
),
prediction = {'happiness': float(predicted_emotions[0]), 'neutral': float(predicted_emotions[1])},
emotion = best_emotion
)
def put_text(image, rect, text):
x, y, w, h = rect
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = h / 30.0
font_thickness = int(round(font_scale * 1.5))
text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness)
center_text_x = x + (w // 2)
center_text_y = y + (h // 2)
text_w, text_h = text_size
lower_left_text_x = center_text_x - (text_w // 2)
lower_left_text_y = center_text_y + (text_h // 2)
cv2.putText(
image, text,
(lower_left_text_x, lower_left_text_y),
font, font_scale, (0, 255, 0), font_thickness
)
def draw_face_info(image, face_info):
x = int(face_info['border']['x'])
y = int(face_info['border']['y'])
w = int(face_info['border']['width'])
h = int(face_info['border']['height'])
emotion = face_info['emotion']
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
put_text(image, (x, y, w, h // 5), emotion)
def show_image(image, title='Result'):
plt.subplot(111), plt.imshow(image), plt.title(title)
plt.show()
if __name__ == '__main__':
# start time
start_time = time.time()
image, gray_image = load_image(sys.argv[1])
for face_info in predict(gray_image):
print(face_info)
draw_face_info(image, face_info)
# end time
end_time = time.time()
show_image(image)
response_time = end_time - start_time
print(response_time)
| [
"cv2.rectangle",
"matplotlib.pyplot.imshow",
"keras.models.load_model",
"matplotlib.pyplot.title",
"cv2.resize",
"cv2.putText",
"matplotlib.pyplot.subplot",
"cv2.cvtColor",
"time.time",
"cv2.CascadeClassifier",
"cv2.getTextSize",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((117, 146), 'keras.models.load_model', 'load_model', (['"""models/model.h5"""'], {}), "('models/model.h5')\n", (127, 146), False, 'from keras.models import load_model\n'), ((191, 256), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""data/haarcascade_frontalface_default.xml"""'], {}), "('data/haarcascade_frontalface_default.xml')\n", (212, 256), False, 'import cv2\n'), ((442, 462), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (452, 462), False, 'import cv2\n'), ((475, 513), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (487, 513), False, 'import cv2\n'), ((531, 570), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (543, 570), False, 'import cv2\n'), ((1571, 1626), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'font_scale', 'font_thickness'], {}), '(text, font, font_scale, font_thickness)\n', (1586, 1626), False, 'import cv2\n'), ((1839, 1954), 'cv2.putText', 'cv2.putText', (['image', 'text', '(lower_left_text_x, lower_left_text_y)', 'font', 'font_scale', '(0, 255, 0)', 'font_thickness'], {}), '(image, text, (lower_left_text_x, lower_left_text_y), font,\n font_scale, (0, 255, 0), font_thickness)\n', (1850, 1954), False, 'import cv2\n'), ((2222, 2282), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (2235, 2282), False, 'import cv2\n'), ((2434, 2444), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2442, 2444), True, 'from matplotlib import pyplot as plt\n'), ((2514, 2525), 'time.time', 'time.time', ([], {}), '()\n', (2523, 2525), False, 'import time\n'), ((2713, 2724), 'time.time', 'time.time', ([], {}), '()\n', (2722, 2724), False, 'import time\n'), ((2376, 2392), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2387, 2392), True, 'from matplotlib import pyplot as plt\n'), ((2394, 2411), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (2404, 2411), True, 'from matplotlib import pyplot as plt\n'), ((2413, 2429), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2422, 2429), True, 'from matplotlib import pyplot as plt\n'), ((788, 814), 'cv2.resize', 'cv2.resize', (['face', '(48, 48)'], {}), '(face, (48, 48))\n', (798, 814), False, 'import cv2\n')] |
import logging
from os.path import isfile, join as pjoin
from os import environ
try:
from delphin import tsdb
except ImportError:
raise ImportError(
'Could not import pyDelphin module. Get it from here:\n'
' https://github.com/goodmami/pydelphin'
)
# ECC 2021-07-26: the lambda for i-comment assumes there will be a translation, but it's not always present,
# so this is a helper function for the lambda to use
def build_comment(igt):
comment = " ".join(item.get_content() for item in next(igt.select(type="glosses"), []))
try:
comment += " // " + str(next(igt.select(type="translations"), [""])[0].get_content())
return comment
except:
return comment
# EMB 2019-04-05 Previously, the lamba part was in prepare_config, but in that case, the last mapper was used for all keys, and I couldn't figure out why. Nor could I see why the lambas weren't called right away. Moving that into DEFAULT_CELLS solved the problem, so I could hae both i-input and i-comment filled in.
DEFAULT_CELLS = [
# i-input is a string of either the first phrase (preferred) or all words
#('i-input', lambda igt: eval('next(igt.select(type="phrases"), [""])[0].value() or '
# '" ".join(item.get_content() '
# ' for item in next(igt.select(type="words"),[]))')),
# KPH 2019-09-30 The first phrases tier is not preferred if we want to target the morpheme segmented line. If the data was converted from flex
# the first phrase tier with id="l" is the language line. We want the phrase tier with id="p"
('i-input', lambda igt: eval('next(igt.select(id="p"), [""])[0].value() or '
'next(igt.select(type="phrases"), [""])[0].value() or '
'" ".join(item.get_content() '
' for item in next(igt.select(type="words"),[]))')),
# i-comment is the glosses concatenated, followed by the translation
('i-comment', lambda igt: build_comment(igt)),
('i-wf', lambda igt: eval('0 if igt.get_meta("judgment") else 1')),
]
def xigt_export(xc, outpath, config=None):
config = prepare_config(config)
if not config.get('relations') or not isfile(config['relations']):
logging.error('Relations file required for [incr tsdb()] export.')
return
# ECC 2021-07-26: fix to work with new version of pydelphin
# read in the schema, export the corpus, initialize the db, and write it in an item file
config['schema'] = tsdb.read_schema(config['relations'])
items = export_corpus(xc, config)
tsdb.initialize_database(outpath, config['schema'], files=False)
tsdb.write(outpath, 'item', items)
def prepare_config(config):
if config is None:
config = {}
config.setdefault('i-id_start', 0)
config.setdefault('i-id_skip', 10)
# attempt to find default Relations file
if 'relations' not in config and 'LOGONROOT' in environ:
rel_path = pjoin(
environ['LOGONROOT'],
'lingo/lkb/src/tsdb/skeletons/english/Relations'
)
if isfile(rel_path):
logging.info('Attempting to get relations file from {}'
.format(rel_path))
config['relations'] = rel_path
config['cells'] = DEFAULT_CELLS
return config
def export_corpus(xc, config):
id_start = config['i-id_start']
id_skip = config['i-id_skip']
items = []
for i, igt in enumerate(xc):
config['__i-id_current__'] = id_start + (i * id_skip)
logging.debug('Exporting {}'.format(str(igt.id)))
# make a list of tsdb records
items.append(tsdb.make_record(export_igt(igt, config), config['schema']['item']))
return items
def export_igt(igt, config):
row = {'i-id': config['__i-id_current__']}
for cell_map in config['cells']:
key, mapper = cell_map
try:
row[key] = mapper(igt)
except SyntaxError:
logging.error('Malformed cell mapper expression for {}'
.format(key))
raise
row['i-length'] = len(row['i-input'].split())
return row
| [
"delphin.tsdb.initialize_database",
"delphin.tsdb.read_schema",
"os.path.join",
"os.path.isfile",
"logging.error",
"delphin.tsdb.write"
] | [((2440, 2477), 'delphin.tsdb.read_schema', 'tsdb.read_schema', (["config['relations']"], {}), "(config['relations'])\n", (2456, 2477), False, 'from delphin import tsdb\n'), ((2520, 2584), 'delphin.tsdb.initialize_database', 'tsdb.initialize_database', (['outpath', "config['schema']"], {'files': '(False)'}), "(outpath, config['schema'], files=False)\n", (2544, 2584), False, 'from delphin import tsdb\n'), ((2589, 2623), 'delphin.tsdb.write', 'tsdb.write', (['outpath', '"""item"""', 'items'], {}), "(outpath, 'item', items)\n", (2599, 2623), False, 'from delphin import tsdb\n'), ((2177, 2243), 'logging.error', 'logging.error', (['"""Relations file required for [incr tsdb()] export."""'], {}), "('Relations file required for [incr tsdb()] export.')\n", (2190, 2243), False, 'import logging\n'), ((2899, 2976), 'os.path.join', 'pjoin', (["environ['LOGONROOT']", '"""lingo/lkb/src/tsdb/skeletons/english/Relations"""'], {}), "(environ['LOGONROOT'], 'lingo/lkb/src/tsdb/skeletons/english/Relations')\n", (2904, 2976), True, 'from os.path import isfile, join as pjoin\n'), ((3022, 3038), 'os.path.isfile', 'isfile', (['rel_path'], {}), '(rel_path)\n', (3028, 3038), False, 'from os.path import isfile, join as pjoin\n'), ((2140, 2167), 'os.path.isfile', 'isfile', (["config['relations']"], {}), "(config['relations'])\n", (2146, 2167), False, 'from os.path import isfile, join as pjoin\n')] |
import numpy as np
import matplotlib.pyplot as plt
import g_functions as g_f
R1 = 2
R2 = .6
M = 500
Delta = .1
NB_POINTS = 2**10
EPSILON_IMAG = 1e-8
parameters = {
'M' : M,
'R1' : R1,
'R2' : R2,
'NB_POINTS' : NB_POINTS,
'EPSILON_IMAG' : EPSILON_IMAG,
'verbosity' : 1,
'ENSAMBLE' : 'Wishart'
}
# Compute sample
S, Y = g_f.make_sample(parameters, Delta)
# Computer rho from theory
rho_theory = g_f.find_rho(parameters, Delta)
# Compute deoising function from theory
denoiser_plot = np.zeros(parameters["NB_POINTS"])
for (i_z, z) in enumerate(rho_theory["zs"]):
denoiser_plot[i_z] = g_f.denoiser(z, parameters, Delta)
plt.hist(g_f.find_spectrum(Y), 80, density=True)
# plt.hist(g_f.find_spectrum(g_f.denoise_sample(Y, parameters, Delta)), 160, density=True)
plt.plot(rho_theory['zs'],rho_theory['rho'],color='red')
# plt.plot(rho_theory['zs'],denoiser_plot)
plt.title(f"R2 = {parameters['R2']}, R1 = {parameters['R1']}")
plt.ylabel("Frequency")
plt.xlabel("Singular value")
plt.show() | [
"g_functions.find_rho",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"g_functions.denoiser",
"numpy.zeros",
"g_functions.find_spectrum",
"g_functions.make_sample",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((398, 432), 'g_functions.make_sample', 'g_f.make_sample', (['parameters', 'Delta'], {}), '(parameters, Delta)\n', (413, 432), True, 'import g_functions as g_f\n'), ((474, 505), 'g_functions.find_rho', 'g_f.find_rho', (['parameters', 'Delta'], {}), '(parameters, Delta)\n', (486, 505), True, 'import g_functions as g_f\n'), ((563, 596), 'numpy.zeros', 'np.zeros', (["parameters['NB_POINTS']"], {}), "(parameters['NB_POINTS'])\n", (571, 596), True, 'import numpy as np\n'), ((846, 904), 'matplotlib.pyplot.plot', 'plt.plot', (["rho_theory['zs']", "rho_theory['rho']"], {'color': '"""red"""'}), "(rho_theory['zs'], rho_theory['rho'], color='red')\n", (854, 904), True, 'import matplotlib.pyplot as plt\n'), ((946, 1008), 'matplotlib.pyplot.title', 'plt.title', (['f"""R2 = {parameters[\'R2\']}, R1 = {parameters[\'R1\']}"""'], {}), '(f"R2 = {parameters[\'R2\']}, R1 = {parameters[\'R1\']}")\n', (955, 1008), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1019, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1061), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Singular value"""'], {}), "('Singular value')\n", (1043, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1070, 1072), True, 'import matplotlib.pyplot as plt\n'), ((667, 701), 'g_functions.denoiser', 'g_f.denoiser', (['z', 'parameters', 'Delta'], {}), '(z, parameters, Delta)\n', (679, 701), True, 'import g_functions as g_f\n'), ((714, 734), 'g_functions.find_spectrum', 'g_f.find_spectrum', (['Y'], {}), '(Y)\n', (731, 734), True, 'import g_functions as g_f\n')] |
from django.contrib import admin
# Register your models here.
from .models import Register
admin.site.register(Register) | [
"django.contrib.admin.site.register"
] | [((93, 122), 'django.contrib.admin.site.register', 'admin.site.register', (['Register'], {}), '(Register)\n', (112, 122), False, 'from django.contrib import admin\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import datetime as dt
from statsmodels.stats.multitest import fdrcorrection
from pylab import savefig
# FUNCTIONS YOU CAN USE:
# analyses(filepath) spits out a nifty heatmap to let you check correlation between variables
#
# regress(option, df) churns out a saucy graph of the linear regression for the variables you provided, where
# option is 'snr_total' or 'tsnr', whichever you want to make the dependent variable of your model
# df is the pandas DataFrame containing your data. To modify which variables you want in your model, you'll
# have to directly modify the regress function
# NOTABLE FILENAMES
# ../data/extractions/p2_BOLD.csv - all dates for p2_BOLD
# ../data/extractions/p2Xs4X35mm_BOLD.csv - all dates for p2Xs4X35mm_BOLD
# ../data/extractions/anat.csv - all possible dates for anatomical data
def filter(option, df):
is_p2 = df['Filetype'] == "task-rest_acq-p2_bold.json"
is_x = df['Filetype'] == "task-rest_acq-p2Xs4X35mm_bold.json"
if option == 'x':
return df[is_x]
elif option == 'p2':
return df[is_p2]
def analyses(filepath):
files = pd.read_csv(filepath)
# FIRST CHECK: CONVERSION SOFTWARE VERSIONS
check = files.iloc[0, 7]
valid = True
for i in files.index:
if check != files.iloc[i, 7]:
valid = False
print("All Conversion Softwares are the same: " + str(valid))
# SECOND CHECK: HEATMAP
figure = sns.heatmap(files.corr(), cmap=sns.diverging_palette(h_neg=240, h_pos=10, n=9, sep=1, center="dark"), center=0)
figure
save = figure.get_figure()
save.savefig('heatmap.svg', pad_inches = 0.1)
def add_seasonal_simple(df, col='Date', start='2017-01-01'):
# Add a very simplistic seasonal regressors as cos and sin since some date in a year
time_delta = df[col] - np.datetime64(start)
time_delta_rad = time_delta.apply(lambda d: d.days) * 2 * np.pi / 365.25
df['Seasonal (sin)'] = np.sin(time_delta_rad)
df['Seasonal (cos)'] = np.cos(time_delta_rad)
def Ftest(model, var_prefix, queue, prints=False):
var_columns = [c for c in model.params.index if c.startswith(var_prefix)]
if var_columns:
f_test = model.f_test(' = '.join(var_columns) + " = 0")
if f_test.pvalue < 0.05:
if var_prefix == "Shim":
for i in range(8):
queue.append("Shim" + str(i+1))
elif var_prefix == "IOPD":
for i in range(6):
queue.append("IOPD" + str(i+1))
if prints:
print("%s F-test: %s" % (var_prefix, f_test))
return f_test
else:
if prints:
print("No %s variables in the model" % var_prefix)
return None
# copy pasted from nipy function, renamed from _orthogonalize
def orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p), the data to be orthogonalized
Returns
-------
X: after orthogonalization
Notes
-----
X is changed in place. the columns are not normalized
"""
if X.size == X.shape[0]:
return X
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(X[:, i], np.dot(X[:, :i], np.linalg.pinv(X[:, :i])))
return X
def regress(target_variable, model_df, plot=True, print_summary=True, add_qa=True, add_seasonal=True, real_data=False):
"""
creates a regression graph plotted against actual data from certain QA metrics
Parameters
----------
target_variable: takes str value of either snr_total or tsnr to model against
model_df : takes pandas DataFrame with data to be used for predictive modeling
plot : boolean to turn the plotted graph on/off
print_summary : boolean to turn the printed summary of OLS regression on/off
add_qa : boolean to add/not add snr_total_qa into list of variables to be modeled
add_seasonal : boolean to add/not add seasonal variables into list of variables to be modeled
real_data : boolean to indicate whether or not the pandas DataFrame being fed in is from real data or not
"""
if type(model_df) is not pd.core.frame.DataFrame:
return "DataFrame must be of type pandas.core.frame.DataFrame"
########## adding seasonal curves to the model
add_seasonal_simple(model_df)
########## Converting date to a format that can be parsed by statsmodels API
model_df = model_df.copy()
date_df = model_df['Date']
model_df['Date'] = pd.to_datetime(model_df['Date'], format="%Y%m%d")
model_df['Date'] = model_df['Date'].map(lambda x: x.toordinal())
f_tests_todo = ['IOPD']
excluded_cols = ['Date', 'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6', 'Seasonal (sin)', 'Seasonal (cos)']
seasonal_cols = ['Seasonal (sin)', 'Seasonal (cos)',]
cols = ['Date']
if not real_data:
# preparing model_df for orthogonalization
cols += ['AcquisitionTime', 'SAR', 'TxRefAmp',
'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6']
if add_seasonal:
cols += seasonal_cols
else:
cols += ['age', 'sex_male', 'PatientWeight',]
if add_seasonal:
cols += seasonal_cols
if add_qa:
cols += ['snr_total_qa']
cols += ['IOPD1_real', 'IOPD2_real', 'IOPD3_real', 'IOPD4_real', 'IOPD5_real', 'IOPD6_real']
if add_seasonal:
f_tests_todo += ['Seasonal']
cols.append(target_variable)
model_df = model_df[cols]
# There is apparently a sample date (20170626) with SAR being unknown None/NaN
# For now we will just filter out those samples
if 'SAR' in model_df.columns:
finite_SAR = np.isfinite(model_df['SAR'])
if not np.all(finite_SAR):
print("Following dates didn't have SAR, excluding them: %s" % str(model_df['Date'][~finite_SAR]))
model_df = model_df[finite_SAR]
orthogonalized_df = model_df.drop(target_variable, axis=1) # avoid orthogonalizing target variable
cols = cols[:-1] # remove target variable from column list
# orthogonalize dataframe after its conversion to NumPy array, then convert back and replace in original model_df
model_array = orthogonalize(orthogonalized_df.to_numpy())
orthogonalized_df = pd.DataFrame(model_array)
orthogonalized_df.columns = [cols]
orthogonalized_df[target_variable] = pd.Series(model_df[target_variable])
model_df = orthogonalized_df
# add datetime64[ns] formatted date time
model_df.columns=[x[0] for x in model_df.columns]
model_df['Date'] = pd.to_datetime(model_df['Date'])
model_df = model_df.drop('Date', axis=1)
model_df['Date'] = date_df
########## Assigning independent and dependent variables
model_vars = []
for item in model_df.std().iteritems():
if item[0] != 'Date' and item[0] != target_variable:
model_vars.append(item[0])
X = model_df[model_vars]
y = model_df[target_variable]
X = X.sub(X.mean())
X = sm.add_constant(X)
model_df = sm.add_constant(model_df)
########## modeling predictions
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
################ CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
significant_variables = []
F_tests_pvals = {
v: float(Ftest(model, v, significant_variables).pvalue)
for v in f_tests_todo
}
# get p-values
for key, value in dict(model.pvalues).items():
if key not in significant_variables and value < 0.05 or key.lower() == 'const':
# identify statistically insignificant variables in df
significant_variables.append(key)
######## set statistically insignificant variables to 0, then predict
partial_fits = {} # partial_fits = {}
for variable in significant_variables:
X2 = X.copy(True) # prepare for mods
for col in X2:
if col != variable:
X2[col] = 0
partial_fits[str(variable)] = model.predict(X2)
if print_summary:
print("Statistically significant variables: " + str(significant_variables))
################ END CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
# Functionality for carrying out FDR correction
outvars = {} # dict containing all predictive variables and their p values from the model
for var in cols:
is_f_test = False
for f_test in f_tests_todo:
if var.startswith(f_test):
is_f_test = True
break
if is_f_test:
continue
if var not in excluded_cols:
var_pvalue = getattr(model.pvalues, var)
outvars[var] = var_pvalue
outvars.update(F_tests_pvals) # add previously conducted F test p values to the outvars
FDR_tuple = fdrcorrection(list(outvars.values())) # actual FDR test conduct
t_f = list(FDR_tuple[0]) # split tuple into true/false array
FDR_pvals = list(FDR_tuple[1]) # split tuple into p value array
print("FDR-corrected p-values:")
for (var, value), fdr_pval, is_sign in zip(outvars.items(), FDR_pvals, t_f):
print("%15s | Original p-value: %8.3g" % (var, value) +
" | FDR-corrected p-value: %8.3g%s" % (fdr_pval, '**' if is_sign else ''))
print("\n")
# giving additional data
if print_summary:
print(model.summary())
print("AIC: " + str(model.aic))
print("BIC: " + str(model.bic))
if not plot:
return model
######### converting the above predictions to a format that can be plotted
plot_df = predictions.to_frame() # new DataFrame containing only data needed for the plot
plot_df.columns = ['full fit']
plot_df = plot_df.join(model_df['Date'])
plot_df = plot_df.join(model_df[target_variable])
summation_df = None
for key, value in partial_fits.items():
column = value.to_frame()
column.columns = ['partial fit']
if summation_df is None:
summation_df = column # used to add up the values
else:
summation_df = summation_df.add(column, axis=1)
plot_df = pd.concat([plot_df, summation_df], axis=1)
# plotting the graph
plt.figure(figsize=(15, 6))
ax = sns.lineplot(x="Date", y=target_variable, data=plot_df, color="#000000")
# plotting partial fit
ax_partial = plt.twinx()
sns.lineplot(x="Date", y="full fit", data=plot_df, color="r", ax=ax)
if partial_fits:
sns.lineplot(x="Date", y="partial fit", data=plot_df, color="#ffcccc", ax=ax_partial)
plt.ylim(145, 305)
ax_partial.legend(['partial fit'])
ax.legend(['actual', 'full fit'], loc='upper left')
plt.savefig("test.svg")
return model
def scrape_var_significance(targets, p_var, df):
dummy = [] # dud list for Seasonal f test comparison
columns = ['Variable', p_var + ' p value', 'R2 value']
result = pd.DataFrame(columns = columns)
raw_pvals = []
for target in targets:
input_df = pd.DataFrame(df,columns=['Date', 'sid', 'ses', target, 'age', 'tsnr',
'snr_total_qa', 'IOPD1_real', 'IOPD2_real', 'IOPD3_real',
'IOPD4_real', 'IOPD5_real', 'IOPD6_real', 'sex_male', 'PatientWeight'])
model = regress(target, input_df, plot=False, print_summary=False, real_data=True)
if p_var == 'Seasonal':
seasonal_ftest = Ftest(model, 'Seasonal', dummy).pvalue
result.loc[len(result)] = [target, seasonal_ftest, model.rsquared]
raw_pvals.append(seasonal_ftest)
else:
var_pval = model.pvalues[p_var]
result.loc[len(result)] = [target, var_pval, model.rsquared]
raw_pvals.append(var_pval)
fdr_df = pd.DataFrame({'FDR-corrected': fdrcorrection(raw_pvals)[1].tolist()})
result = result.join(fdr_df)
return result
| [
"numpy.linalg.pinv",
"pandas.read_csv",
"numpy.isfinite",
"numpy.sin",
"statsmodels.api.OLS",
"pandas.to_datetime",
"matplotlib.pyplot.twinx",
"numpy.datetime64",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"statsmodels.stats.multitest.fdrcorrection",
"matplotlib.pyplot.savefig",
"seaborn.... | [((1308, 1329), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (1319, 1329), True, 'import pandas as pd\n'), ((2167, 2189), 'numpy.sin', 'np.sin', (['time_delta_rad'], {}), '(time_delta_rad)\n', (2173, 2189), True, 'import numpy as np\n'), ((2217, 2239), 'numpy.cos', 'np.cos', (['time_delta_rad'], {}), '(time_delta_rad)\n', (2223, 2239), True, 'import numpy as np\n'), ((4849, 4898), 'pandas.to_datetime', 'pd.to_datetime', (["model_df['Date']"], {'format': '"""%Y%m%d"""'}), "(model_df['Date'], format='%Y%m%d')\n", (4863, 4898), True, 'import pandas as pd\n'), ((6673, 6698), 'pandas.DataFrame', 'pd.DataFrame', (['model_array'], {}), '(model_array)\n', (6685, 6698), True, 'import pandas as pd\n'), ((6779, 6815), 'pandas.Series', 'pd.Series', (['model_df[target_variable]'], {}), '(model_df[target_variable])\n', (6788, 6815), True, 'import pandas as pd\n'), ((6976, 7008), 'pandas.to_datetime', 'pd.to_datetime', (["model_df['Date']"], {}), "(model_df['Date'])\n", (6990, 7008), True, 'import pandas as pd\n'), ((7422, 7440), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (7437, 7440), True, 'import statsmodels.api as sm\n'), ((7461, 7486), 'statsmodels.api.add_constant', 'sm.add_constant', (['model_df'], {}), '(model_df)\n', (7476, 7486), True, 'import statsmodels.api as sm\n'), ((10718, 10760), 'pandas.concat', 'pd.concat', (['[plot_df, summation_df]'], {'axis': '(1)'}), '([plot_df, summation_df], axis=1)\n', (10727, 10760), True, 'import pandas as pd\n'), ((10795, 10822), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (10805, 10822), True, 'import matplotlib.pyplot as plt\n'), ((10833, 10905), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': 'target_variable', 'data': 'plot_df', 'color': '"""#000000"""'}), "(x='Date', y=target_variable, data=plot_df, color='#000000')\n", (10845, 10905), True, 'import seaborn as sns\n'), ((10955, 10966), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (10964, 10966), True, 'import matplotlib.pyplot as plt\n'), ((10971, 11039), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': '"""full fit"""', 'data': 'plot_df', 'color': '"""r"""', 'ax': 'ax'}), "(x='Date', y='full fit', data=plot_df, color='r', ax=ax)\n", (10983, 11039), True, 'import seaborn as sns\n'), ((11290, 11313), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.svg"""'], {}), "('test.svg')\n", (11301, 11313), True, 'import matplotlib.pyplot as plt\n'), ((11516, 11545), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (11528, 11545), True, 'import pandas as pd\n'), ((2042, 2062), 'numpy.datetime64', 'np.datetime64', (['start'], {}), '(start)\n', (2055, 2062), True, 'import numpy as np\n'), ((6073, 6101), 'numpy.isfinite', 'np.isfinite', (["model_df['SAR']"], {}), "(model_df['SAR'])\n", (6084, 6101), True, 'import numpy as np\n'), ((11069, 11159), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': '"""partial fit"""', 'data': 'plot_df', 'color': '"""#ffcccc"""', 'ax': 'ax_partial'}), "(x='Date', y='partial fit', data=plot_df, color='#ffcccc', ax=\n ax_partial)\n", (11081, 11159), True, 'import seaborn as sns\n'), ((11163, 11181), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(145)', '(305)'], {}), '(145, 305)\n', (11171, 11181), True, 'import matplotlib.pyplot as plt\n'), ((11618, 11826), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'columns': "['Date', 'sid', 'ses', target, 'age', 'tsnr', 'snr_total_qa', 'IOPD1_real',\n 'IOPD2_real', 'IOPD3_real', 'IOPD4_real', 'IOPD5_real', 'IOPD6_real',\n 'sex_male', 'PatientWeight']"}), "(df, columns=['Date', 'sid', 'ses', target, 'age', 'tsnr',\n 'snr_total_qa', 'IOPD1_real', 'IOPD2_real', 'IOPD3_real', 'IOPD4_real',\n 'IOPD5_real', 'IOPD6_real', 'sex_male', 'PatientWeight'])\n", (11630, 11826), True, 'import pandas as pd\n'), ((1680, 1749), 'seaborn.diverging_palette', 'sns.diverging_palette', ([], {'h_neg': '(240)', 'h_pos': '(10)', 'n': '(9)', 'sep': '(1)', 'center': '"""dark"""'}), "(h_neg=240, h_pos=10, n=9, sep=1, center='dark')\n", (1701, 1749), True, 'import seaborn as sns\n'), ((6117, 6135), 'numpy.all', 'np.all', (['finite_SAR'], {}), '(finite_SAR)\n', (6123, 6135), True, 'import numpy as np\n'), ((7545, 7557), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (7551, 7557), True, 'import statsmodels.api as sm\n'), ((3511, 3535), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X[:, :i]'], {}), '(X[:, :i])\n', (3525, 3535), True, 'import numpy as np\n'), ((12473, 12497), 'statsmodels.stats.multitest.fdrcorrection', 'fdrcorrection', (['raw_pvals'], {}), '(raw_pvals)\n', (12486, 12497), False, 'from statsmodels.stats.multitest import fdrcorrection\n')] |
import pytest
from eunomia.config._default import Default
from eunomia.config.nodes import ConfigNode
from tests.test_backend_obj import _make_config_group
# ========================================================================= #
# Test YAML & Custom Tags #
# ========================================================================= #
def _resolver(string):
if isinstance(string, ConfigNode):
return string.get_config_value({}, {}, {})
return string
def _resolve_default(group, default):
# we are testing this! \/ \/ \/
g, c, pkg, is_self = default.to_resolved_components(group, _resolver)
# we are testing this! /\ /\ /\
return g.abs_path, [c.abs_path for c in c], pkg
def test_defaults():
root = _make_config_group(suboption=None, suboption2=None, package1='<option>', package2='asdf.fdsa')
d = root.get_option_recursive('default')
s1 = root.get_group_recursive('subgroup')
s1o1 = root.get_option_recursive('subgroup/suboption1')
s1o2 = root.get_option_recursive('subgroup/suboption2')
s2 = root.get_group_recursive('subgroup2')
s2s3 = root.get_group_recursive('subgroup2/subgroup3')
s2s3o1 = root.get_option_recursive('subgroup2/subgroup3/suboption1')
s2s3o2 = root.get_option_recursive('subgroup2/subgroup3/suboption2')
# multiple different versions
assert _resolve_default(root, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: d})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: [d]})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': 'default'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': '/default'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': ['/default']})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': ['default']})) == ('/', ['/default'], ())
# these should throw errors, option points to option
with pytest.raises(KeyError, match='key .* is not a group'): _resolve_default(root, Default({'/default': ['default']}))
with pytest.raises(KeyError, match='key .* is not a group'): _resolve_default(root, Default({d: d}))
with pytest.raises(KeyError, match='key .* is not a group'): _resolve_default(root, Default({d: [d]}))
# allow group to represent all suboptions
assert _resolve_default(root, Default('')) == ('/', ['/default'], ()) # technically this is valid, its just confusing... should it be disabled?
assert _resolve_default(root, Default('default')) == ('/', ['/default'], ()) # we want relative support in case we use group.absorb for example
assert _resolve_default(root, Default('/')) == ('/', ['/default'], ())
assert _resolve_default(root, Default('/default')) == ('/', ['/default'], ())
assert _resolve_default(root, Default(root)) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': '/'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': '*'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: '*'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: root})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': root})) == ('/', ['/default'], ())
# these should throw errors, group points to group in list
with pytest.raises(KeyError, match='value in list .* is not an option'): _resolve_default(root, Default({'/': ['subgroup']}))
with pytest.raises(KeyError, match='value in list .* is not an option'): _resolve_default(root, Default({'/': ['default', 'subgroup']}))
# check parents
assert _resolve_default(root, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(s1, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(s2, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(s2s3, Default(d)) == ('/', ['/default'], ())
# check others
assert _resolve_default(root, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(s1, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(s2, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(s2s3, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default({s1: '*'})) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default({s1: s1})) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
# strings
assert _resolve_default(root, Default('subgroup')) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default('/subgroup')) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default(s1o1)) == ('/subgroup', ['/subgroup/suboption1'], ('subgroup',))
assert _resolve_default(root, Default({'/subgroup': 'suboption1'})) == ('/subgroup', ['/subgroup/suboption1'], ('subgroup',))
assert _resolve_default(root, Default({'subgroup': 'suboption1'})) == ('/subgroup', ['/subgroup/suboption1'], ('subgroup',))
with pytest.raises(KeyError, match="Group '/subgroup2/subgroup3' does not have child 'subgroup'"):
_resolve_default(s2s3, Default({'subgroup': 'suboption1'}))
with pytest.raises(KeyError, match="Group '/subgroup2' does not have child 'subgroup'"):
_resolve_default(s2, Default({'subgroup': 'suboption1'}))
with pytest.raises(KeyError, match="Group '/subgroup' does not have child 'subgroup'"):
_resolve_default(s1, Default({'subgroup': 'suboption1'}))
def test_defaults_advanced():
def resolve_entry_defaults(group):
results = []
for default in group.get_option('default').get_unresolved_defaults():
results.append(_resolve_default(group, default))
return results
assert resolve_entry_defaults(_make_config_group(suboption='suboption1')) == [('/subgroup', ['/subgroup/suboption1'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption='suboption2')) == [('/subgroup', ['/subgroup/suboption2'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption=['suboption2'])) == [('/subgroup', ['/subgroup/suboption2'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption=['suboption1', 'suboption2'])) == [('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption=None, suboption2='suboption1')) == [('/subgroup2/subgroup3', ['/subgroup2/subgroup3/suboption1'], ('subgroup2', 'subgroup3'))]
assert resolve_entry_defaults(_make_config_group(suboption=None, suboption2='suboption2')) == [('/subgroup2/subgroup3', ['/subgroup2/subgroup3/suboption2'], ('subgroup2', 'subgroup3'))]
| [
"eunomia.config._default.Default",
"pytest.raises",
"tests.test_backend_obj._make_config_group"
] | [((799, 897), 'tests.test_backend_obj._make_config_group', '_make_config_group', ([], {'suboption': 'None', 'suboption2': 'None', 'package1': '"""<option>"""', 'package2': '"""asdf.fdsa"""'}), "(suboption=None, suboption2=None, package1='<option>',\n package2='asdf.fdsa')\n", (817, 897), False, 'from tests.test_backend_obj import _make_config_group\n'), ((2097, 2151), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""key .* is not a group"""'}), "(KeyError, match='key .* is not a group')\n", (2110, 2151), False, 'import pytest\n'), ((2221, 2275), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""key .* is not a group"""'}), "(KeyError, match='key .* is not a group')\n", (2234, 2275), False, 'import pytest\n'), ((2326, 2380), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""key .* is not a group"""'}), "(KeyError, match='key .* is not a group')\n", (2339, 2380), False, 'import pytest\n'), ((3488, 3554), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""value in list .* is not an option"""'}), "(KeyError, match='value in list .* is not an option')\n", (3501, 3554), False, 'import pytest\n'), ((3618, 3684), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""value in list .* is not an option"""'}), "(KeyError, match='value in list .* is not an option')\n", (3631, 3684), False, 'import pytest\n'), ((5767, 5864), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Group \'/subgroup2/subgroup3\' does not have child \'subgroup\'"""'}), '(KeyError, match=\n "Group \'/subgroup2/subgroup3\' does not have child \'subgroup\'")\n', (5780, 5864), False, 'import pytest\n'), ((5938, 6025), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Group \'/subgroup2\' does not have child \'subgroup\'"""'}), '(KeyError, match=\n "Group \'/subgroup2\' does not have child \'subgroup\'")\n', (5951, 6025), False, 'import pytest\n'), ((6099, 6185), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Group \'/subgroup\' does not have child \'subgroup\'"""'}), '(KeyError, match=\n "Group \'/subgroup\' does not have child \'subgroup\'")\n', (6112, 6185), False, 'import pytest\n'), ((1427, 1437), 'eunomia.config._default.Default', 'Default', (['d'], {}), '(d)\n', (1434, 1437), False, 'from eunomia.config._default import Default\n'), ((1518, 1536), 'eunomia.config._default.Default', 'Default', (['{root: d}'], {}), '({root: d})\n', (1525, 1536), False, 'from eunomia.config._default import Default\n'), ((1609, 1629), 'eunomia.config._default.Default', 'Default', (['{root: [d]}'], {}), '({root: [d]})\n', (1616, 1629), False, 'from eunomia.config._default import Default\n'), ((1700, 1725), 'eunomia.config._default.Default', 'Default', (["{'/': 'default'}"], {}), "({'/': 'default'})\n", (1707, 1725), False, 'from eunomia.config._default import Default\n'), ((1791, 1817), 'eunomia.config._default.Default', 'Default', (["{'/': '/default'}"], {}), "({'/': '/default'})\n", (1798, 1817), False, 'from eunomia.config._default import Default\n'), ((1882, 1910), 'eunomia.config._default.Default', 'Default', (["{'/': ['/default']}"], {}), "({'/': ['/default']})\n", (1889, 1910), False, 'from eunomia.config._default import Default\n'), ((1973, 2000), 'eunomia.config._default.Default', 'Default', (["{'/': ['default']}"], {}), "({'/': ['default']})\n", (1980, 2000), False, 'from eunomia.config._default import Default\n'), ((2176, 2210), 'eunomia.config._default.Default', 'Default', (["{'/default': ['default']}"], {}), "({'/default': ['default']})\n", (2183, 2210), False, 'from eunomia.config._default import Default\n'), ((2300, 2315), 'eunomia.config._default.Default', 'Default', (['{d: d}'], {}), '({d: d})\n', (2307, 2315), False, 'from eunomia.config._default import Default\n'), ((2405, 2422), 'eunomia.config._default.Default', 'Default', (['{d: [d]}'], {}), '({d: [d]})\n', (2412, 2422), False, 'from eunomia.config._default import Default\n'), ((2505, 2516), 'eunomia.config._default.Default', 'Default', (['""""""'], {}), "('')\n", (2512, 2516), False, 'from eunomia.config._default import Default\n'), ((2654, 2672), 'eunomia.config._default.Default', 'Default', (['"""default"""'], {}), "('default')\n", (2661, 2672), False, 'from eunomia.config._default import Default\n'), ((2803, 2815), 'eunomia.config._default.Default', 'Default', (['"""/"""'], {}), "('/')\n", (2810, 2815), False, 'from eunomia.config._default import Default\n'), ((2878, 2897), 'eunomia.config._default.Default', 'Default', (['"""/default"""'], {}), "('/default')\n", (2885, 2897), False, 'from eunomia.config._default import Default\n'), ((2960, 2973), 'eunomia.config._default.Default', 'Default', (['root'], {}), '(root)\n', (2967, 2973), False, 'from eunomia.config._default import Default\n'), ((3036, 3055), 'eunomia.config._default.Default', 'Default', (["{'/': '/'}"], {}), "({'/': '/'})\n", (3043, 3055), False, 'from eunomia.config._default import Default\n'), ((3118, 3137), 'eunomia.config._default.Default', 'Default', (["{'/': '*'}"], {}), "({'/': '*'})\n", (3125, 3137), False, 'from eunomia.config._default import Default\n'), ((3200, 3220), 'eunomia.config._default.Default', 'Default', (["{root: '*'}"], {}), "({root: '*'})\n", (3207, 3220), False, 'from eunomia.config._default import Default\n'), ((3283, 3304), 'eunomia.config._default.Default', 'Default', (['{root: root}'], {}), '({root: root})\n', (3290, 3304), False, 'from eunomia.config._default import Default\n'), ((3367, 3387), 'eunomia.config._default.Default', 'Default', (["{'/': root}"], {}), "({'/': root})\n", (3374, 3387), False, 'from eunomia.config._default import Default\n'), ((3579, 3607), 'eunomia.config._default.Default', 'Default', (["{'/': ['subgroup']}"], {}), "({'/': ['subgroup']})\n", (3586, 3607), False, 'from eunomia.config._default import Default\n'), ((3709, 3748), 'eunomia.config._default.Default', 'Default', (["{'/': ['default', 'subgroup']}"], {}), "({'/': ['default', 'subgroup']})\n", (3716, 3748), False, 'from eunomia.config._default import Default\n'), ((3805, 3815), 'eunomia.config._default.Default', 'Default', (['d'], {}), '(d)\n', (3812, 3815), False, 'from eunomia.config._default import Default\n'), ((3876, 3886), 'eunomia.config._default.Default', 'Default', (['d'], {}), '(d)\n', (3883, 3886), False, 'from eunomia.config._default import Default\n'), ((3947, 3957), 'eunomia.config._default.Default', 'Default', (['d'], {}), '(d)\n', (3954, 3957), False, 'from eunomia.config._default import Default\n'), ((4020, 4030), 'eunomia.config._default.Default', 'Default', (['d'], {}), '(d)\n', (4027, 4030), False, 'from eunomia.config._default import Default\n'), ((4113, 4124), 'eunomia.config._default.Default', 'Default', (['s1'], {}), '(s1)\n', (4120, 4124), False, 'from eunomia.config._default import Default\n'), ((4249, 4260), 'eunomia.config._default.Default', 'Default', (['s1'], {}), '(s1)\n', (4256, 4260), False, 'from eunomia.config._default import Default\n'), ((4385, 4396), 'eunomia.config._default.Default', 'Default', (['s1'], {}), '(s1)\n', (4392, 4396), False, 'from eunomia.config._default import Default\n'), ((4521, 4532), 'eunomia.config._default.Default', 'Default', (['s1'], {}), '(s1)\n', (4528, 4532), False, 'from eunomia.config._default import Default\n'), ((4657, 4675), 'eunomia.config._default.Default', 'Default', (["{s1: '*'}"], {}), "({s1: '*'})\n", (4664, 4675), False, 'from eunomia.config._default import Default\n'), ((4793, 4810), 'eunomia.config._default.Default', 'Default', (['{s1: s1}'], {}), '({s1: s1})\n', (4800, 4810), False, 'from eunomia.config._default import Default\n'), ((4944, 4963), 'eunomia.config._default.Default', 'Default', (['"""subgroup"""'], {}), "('subgroup')\n", (4951, 4963), False, 'from eunomia.config._default import Default\n'), ((5097, 5117), 'eunomia.config._default.Default', 'Default', (['"""/subgroup"""'], {}), "('/subgroup')\n", (5104, 5117), False, 'from eunomia.config._default import Default\n'), ((5250, 5261), 'eunomia.config._default.Default', 'Default', (['s1'], {}), '(s1)\n', (5257, 5261), False, 'from eunomia.config._default import Default\n'), ((5403, 5416), 'eunomia.config._default.Default', 'Default', (['s1o1'], {}), '(s1o1)\n', (5410, 5416), False, 'from eunomia.config._default import Default\n'), ((5532, 5568), 'eunomia.config._default.Default', 'Default', (["{'/subgroup': 'suboption1'}"], {}), "({'/subgroup': 'suboption1'})\n", (5539, 5568), False, 'from eunomia.config._default import Default\n'), ((5662, 5697), 'eunomia.config._default.Default', 'Default', (["{'subgroup': 'suboption1'}"], {}), "({'subgroup': 'suboption1'})\n", (5669, 5697), False, 'from eunomia.config._default import Default\n'), ((5892, 5927), 'eunomia.config._default.Default', 'Default', (["{'subgroup': 'suboption1'}"], {}), "({'subgroup': 'suboption1'})\n", (5899, 5927), False, 'from eunomia.config._default import Default\n'), ((6053, 6088), 'eunomia.config._default.Default', 'Default', (["{'subgroup': 'suboption1'}"], {}), "({'subgroup': 'suboption1'})\n", (6060, 6088), False, 'from eunomia.config._default import Default\n'), ((6213, 6248), 'eunomia.config._default.Default', 'Default', (["{'subgroup': 'suboption1'}"], {}), "({'subgroup': 'suboption1'})\n", (6220, 6248), False, 'from eunomia.config._default import Default\n'), ((6540, 6582), 'tests.test_backend_obj._make_config_group', '_make_config_group', ([], {'suboption': '"""suboption1"""'}), "(suboption='suboption1')\n", (6558, 6582), False, 'from tests.test_backend_obj import _make_config_group\n'), ((6678, 6720), 'tests.test_backend_obj._make_config_group', '_make_config_group', ([], {'suboption': '"""suboption2"""'}), "(suboption='suboption2')\n", (6696, 6720), False, 'from tests.test_backend_obj import _make_config_group\n'), ((6816, 6860), 'tests.test_backend_obj._make_config_group', '_make_config_group', ([], {'suboption': "['suboption2']"}), "(suboption=['suboption2'])\n", (6834, 6860), False, 'from tests.test_backend_obj import _make_config_group\n'), ((6956, 7014), 'tests.test_backend_obj._make_config_group', '_make_config_group', ([], {'suboption': "['suboption1', 'suboption2']"}), "(suboption=['suboption1', 'suboption2'])\n", (6974, 7014), False, 'from tests.test_backend_obj import _make_config_group\n'), ((7135, 7194), 'tests.test_backend_obj._make_config_group', '_make_config_group', ([], {'suboption': 'None', 'suboption2': '"""suboption1"""'}), "(suboption=None, suboption2='suboption1')\n", (7153, 7194), False, 'from tests.test_backend_obj import _make_config_group\n'), ((7325, 7384), 'tests.test_backend_obj._make_config_group', '_make_config_group', ([], {'suboption': 'None', 'suboption2': '"""suboption2"""'}), "(suboption=None, suboption2='suboption2')\n", (7343, 7384), False, 'from tests.test_backend_obj import _make_config_group\n')] |
import os
import logging
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Notify', '0.7')
from locale import atof, setlocale, LC_NUMERIC
from gi.repository import Notify
from itertools import islice
from subprocess import check_output, check_call, CalledProcessError
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionSmallResultItem import ExtensionSmallResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
logger = logging.getLogger(__name__)
ext_icon = 'images/icon.png'
exec_icon = 'images/executable.png'
dead_icon = 'images/dead.png'
class ProcessKillerExtension(Extension):
def __init__(self):
super(ProcessKillerExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
setlocale(LC_NUMERIC, '') # set to OS default locale;
def show_notification(self, title, text=None, icon=ext_icon):
logger.debug('Show notification: %s' % text)
icon_full_path = os.path.join(os.path.dirname(__file__), icon)
Notify.init("KillerExtension")
Notify.Notification.new(title, text, icon_full_path).show()
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
return RenderResultListAction(list(islice(self.generate_results(event), 15)))
def generate_results(self, event):
for (pid, cpu, cmd) in get_process_list():
name = '[%s%% CPU] %s' % (cpu, cmd) if cpu > 1 else cmd
on_enter = {'alt_enter': False, 'pid': pid, 'cmd': cmd}
on_alt_enter = on_enter.copy()
on_alt_enter['alt_enter'] = True
if event.get_argument():
if event.get_argument() in cmd:
yield ExtensionSmallResultItem(icon=exec_icon,
name=name,
on_enter=ExtensionCustomAction(on_enter),
on_alt_enter=ExtensionCustomAction(on_alt_enter, keep_app_open=True))
else:
yield ExtensionSmallResultItem(icon=exec_icon,
name=name,
on_enter=ExtensionCustomAction(on_enter),
on_alt_enter=ExtensionCustomAction(on_alt_enter, keep_app_open=True))
class ItemEnterEventListener(EventListener):
def kill(self, extension, pid, signal):
cmd = ['kill', '-s', signal, pid]
logger.info(' '.join(cmd))
try:
check_call(cmd) == 0
extension.show_notification("Done", "It's dead now", icon=dead_icon)
except CalledProcessError as e:
extension.show_notification("Error", "'kill' returned code %s" % e.returncode)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e))
extension.show_notification("Error", "Check the logs")
raise
def show_signal_options(self, data):
result_items = []
options = [('TERM', '15 TERM (default)'), ('KILL', '9 KILL'), ('HUP', '1 HUP')]
for sig, name in options:
on_enter = data.copy()
on_enter['alt_enter'] = False
on_enter['signal'] = sig
result_items.append(ExtensionSmallResultItem(icon=ext_icon,
name=name,
highlightable=False,
on_enter=ExtensionCustomAction(on_enter)))
return RenderResultListAction(result_items)
def on_event(self, event, extension):
data = event.get_data()
if data['alt_enter']:
return self.show_signal_options(data)
else:
self.kill(extension, data['pid'], data.get('signal', 'TERM'))
def get_process_list():
"""
Returns a list of tuples (PID, %CPU, COMMAND)
"""
env = os.environ.copy()
env['COLUMNS'] = '200'
out = check_output(['ps', '-eo', 'pid,%cpu,cmd', '--sort', '-%cpu'], env=env).decode('utf8')
for line in out.split('\n'):
col = line.split()
try:
int(col[0])
except (ValueError, IndexError):
# not a number
continue
pid = col[0]
cpu = atof(col[1])
cmd = ' '.join(col[2:])
if 'top -bn' in cmd:
continue
yield (pid, cpu, cmd)
if __name__ == '__main__':
ProcessKillerExtension().run()
| [
"logging.getLogger",
"subprocess.check_output",
"locale.atof",
"locale.setlocale",
"subprocess.check_call",
"ulauncher.api.shared.action.RenderResultListAction.RenderResultListAction",
"os.environ.copy",
"gi.require_version",
"ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction",... | [((36, 68), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (54, 68), False, 'import gi\n'), ((69, 104), 'gi.require_version', 'gi.require_version', (['"""Notify"""', '"""0.7"""'], {}), "('Notify', '0.7')\n", (87, 104), False, 'import gi\n'), ((739, 766), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (756, 766), False, 'import logging\n'), ((4371, 4388), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (4386, 4388), False, 'import os\n'), ((1129, 1154), 'locale.setlocale', 'setlocale', (['LC_NUMERIC', '""""""'], {}), "(LC_NUMERIC, '')\n", (1138, 1154), False, 'from locale import atof, setlocale, LC_NUMERIC\n'), ((1383, 1413), 'gi.repository.Notify.init', 'Notify.init', (['"""KillerExtension"""'], {}), "('KillerExtension')\n", (1394, 1413), False, 'from gi.repository import Notify\n'), ((3989, 4025), 'ulauncher.api.shared.action.RenderResultListAction.RenderResultListAction', 'RenderResultListAction', (['result_items'], {}), '(result_items)\n', (4011, 4025), False, 'from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction\n'), ((4735, 4747), 'locale.atof', 'atof', (['col[1]'], {}), '(col[1])\n', (4739, 4747), False, 'from locale import atof, setlocale, LC_NUMERIC\n'), ((1342, 1367), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1357, 1367), False, 'import os\n'), ((4426, 4497), 'subprocess.check_output', 'check_output', (["['ps', '-eo', 'pid,%cpu,cmd', '--sort', '-%cpu']"], {'env': 'env'}), "(['ps', '-eo', 'pid,%cpu,cmd', '--sort', '-%cpu'], env=env)\n", (4438, 4497), False, 'from subprocess import check_output, check_call, CalledProcessError\n'), ((1422, 1474), 'gi.repository.Notify.Notification.new', 'Notify.Notification.new', (['title', 'text', 'icon_full_path'], {}), '(title, text, icon_full_path)\n', (1445, 1474), False, 'from gi.repository import Notify\n'), ((2944, 2959), 'subprocess.check_call', 'check_call', (['cmd'], {}), '(cmd)\n', (2954, 2959), False, 'from subprocess import check_output, check_call, CalledProcessError\n'), ((3940, 3971), 'ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction', 'ExtensionCustomAction', (['on_enter'], {}), '(on_enter)\n', (3961, 3971), False, 'from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction\n'), ((2599, 2630), 'ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction', 'ExtensionCustomAction', (['on_enter'], {}), '(on_enter)\n', (2620, 2630), False, 'from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction\n'), ((2692, 2747), 'ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction', 'ExtensionCustomAction', (['on_alt_enter'], {'keep_app_open': '(True)'}), '(on_alt_enter, keep_app_open=True)\n', (2713, 2747), False, 'from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction\n'), ((2250, 2281), 'ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction', 'ExtensionCustomAction', (['on_enter'], {}), '(on_enter)\n', (2271, 2281), False, 'from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction\n'), ((2347, 2402), 'ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction', 'ExtensionCustomAction', (['on_alt_enter'], {'keep_app_open': '(True)'}), '(on_alt_enter, keep_app_open=True)\n', (2368, 2402), False, 'from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction\n')] |
from flask import current_app
from flask import g
from flask import request
from flask_restful.reqparse import RequestParser
from flask_restful import Resource
from models import db
from models.user import User
from utils.decorators import login_required
from utils.parser import image_file
from utils.storage import upload_image
class PhotoResource(Resource):
# 使用装饰器验证用户
method_decorators = [login_required]
def patch(self):
print(request.__dict__)
# 接收请求的参数,并做检查
rp = RequestParser()
rp.add_argument('photo', type=image_file, required=True, location='files')
args_dict = rp.parse_args()
# 文件对象
photo = args_dict['photo']
# 上传图片到七牛云,获取图片key,就是图片的url名称
file_name = upload_image(photo.read())
# 把图片的名字保存到数据库
User.query.filter(User.id==g.user_id).update({'profile_photo': file_name})
db.session.commit()
# 把图片的完整url返回
ret_dict = {
'photo_url': '{}/{}'.format(current_app.config['QINIU_DOMAIN'], file_name)
}
return ret_dict
from cache.user import UserProfileCache
class CurrentUserResource(Resource):
# 检查登录
method_decorators = [login_required]
# 请求钩子 utils.middlewares.jwt_authentication已经注册生效了:把token中的user_id写入g对象中
def get(self):
# 返回当前用户信息
# 从缓存和持久化存储中获取
# 代码执行到这里时,就应该已经有g.user_id
ret = UserProfileCache(user_id=g.user_id).get()
print('=')
print(ret)
ret_dict = {
'user_id': g.user_id,
'user_name': ret['name'],
'user_mobile': ret['mobile'],
'user_photo': ret['profile_photo'],
'certificate': ret['certificate'],
'introduction': ret['introduction'],
'arts_count': 0,
'following_count': 0
}
return ret_dict
def delete(self):
ret = UserProfileCache(user_id=g.user_id).exists()
if ret:
UserProfileCache(user_id=g.user_id).clear()
return {'message': 'ok'} | [
"cache.user.UserProfileCache",
"flask_restful.reqparse.RequestParser",
"models.user.User.query.filter",
"models.db.session.commit"
] | [((511, 526), 'flask_restful.reqparse.RequestParser', 'RequestParser', ([], {}), '()\n', (524, 526), False, 'from flask_restful.reqparse import RequestParser\n'), ((895, 914), 'models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (912, 914), False, 'from models import db\n'), ((812, 851), 'models.user.User.query.filter', 'User.query.filter', (['(User.id == g.user_id)'], {}), '(User.id == g.user_id)\n', (829, 851), False, 'from models.user import User\n'), ((1396, 1431), 'cache.user.UserProfileCache', 'UserProfileCache', ([], {'user_id': 'g.user_id'}), '(user_id=g.user_id)\n', (1412, 1431), False, 'from cache.user import UserProfileCache\n'), ((1888, 1923), 'cache.user.UserProfileCache', 'UserProfileCache', ([], {'user_id': 'g.user_id'}), '(user_id=g.user_id)\n', (1904, 1923), False, 'from cache.user import UserProfileCache\n'), ((1961, 1996), 'cache.user.UserProfileCache', 'UserProfileCache', ([], {'user_id': 'g.user_id'}), '(user_id=g.user_id)\n', (1977, 1996), False, 'from cache.user import UserProfileCache\n')] |
"""Implement the Unit class."""
import numpy as np
from .. import config, constants
__all__ = ["Pixels", "Degrees", "Munits", "Percent"]
class _PixelUnits:
def __mul__(self, val):
return val * config.frame_width / config.pixel_width
def __rmul__(self, val):
return val * config.frame_width / config.pixel_width
class Percent:
def __init__(self, axis):
if np.array_equal(axis, constants.X_AXIS):
self.length = config.frame_width
if np.array_equal(axis, constants.Y_AXIS):
self.length = config.frame_height
if np.array_equal(axis, constants.Z_AXIS):
raise NotImplementedError("length of Z axis is undefined")
def __mul__(self, val):
return val / 100 * self.length
def __rmul__(self, val):
return val / 100 * self.length
Pixels = _PixelUnits()
Degrees = constants.PI / 180
Munits = 1
| [
"numpy.array_equal"
] | [((399, 437), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.X_AXIS'], {}), '(axis, constants.X_AXIS)\n', (413, 437), True, 'import numpy as np\n'), ((495, 533), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.Y_AXIS'], {}), '(axis, constants.Y_AXIS)\n', (509, 533), True, 'import numpy as np\n'), ((592, 630), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.Z_AXIS'], {}), '(axis, constants.Z_AXIS)\n', (606, 630), True, 'import numpy as np\n')] |
from urllib.request import urlopen
from bs4 import BeautifulSoup as soup
import re
import pandas as pd
def getContainerInfo(container):
name = container.img['title']
itemInfo = container.find('div',class_='item-info')
itemBranding = itemInfo.find('div',class_ = 'item-branding')
brandName = itemBranding.img['title']
ratingTag = itemBranding.find("a",class_="item-rating")
rating = re.search('[0-5]',ratingTag['title']).group() if ratingTag != None else None
ratingCount = re.search('\d+',itemBranding.find("span",class_='item-rating-num').get_text()).group() if ratingTag != None else None
priceContainer = itemInfo.find("div",class_="item-action").ul.find("li",class_="price-current")
price = re.findall('\d{1,3}(?:[.,]\d{3})*(?:[.,]\d{2})?',priceContainer.get_text())
return name,brandName,rating,ratingCount,price
def convertToPandasDF(data,columns):
name = [d[0] for d in data]
brand = [d[1] for d in data]
userRating = [d[2] for d in data]
userCount = [d[3] for d in data]
price = [d[4][0] for d in data]
offer = []
for d in data:
if(len(d[4]) == 2):
offer.append(d[4][1])
else:
offer.append(None)
df = pd.DataFrame({columns[0]:name,columns[1]:brand,columns[2]:userRating,columns[3]:userCount,columns[4]:price,columns[5]:offer})
return df
def main():
url = 'https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphics%20card'
response = urlopen(url)
html = response.read()
parsedHtml = soup(html,"html.parser")
containerDivs = parsedHtml.find_all("div",class_= "item-container")
data = [getContainerInfo(container) for container in containerDivs]
columns = ['Product Name','Brand Name','Average User Rating','User Count','Price','Offer Count']
df = convertToPandasDF(data,columns)
df.to_excel("out.xlsx")
if __name__ == "__main__":
main() | [
"pandas.DataFrame",
"bs4.BeautifulSoup",
"urllib.request.urlopen",
"re.search"
] | [((1238, 1378), 'pandas.DataFrame', 'pd.DataFrame', (['{columns[0]: name, columns[1]: brand, columns[2]: userRating, columns[3]:\n userCount, columns[4]: price, columns[5]: offer}'], {}), '({columns[0]: name, columns[1]: brand, columns[2]: userRating,\n columns[3]: userCount, columns[4]: price, columns[5]: offer})\n', (1250, 1378), True, 'import pandas as pd\n'), ((1506, 1518), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1513, 1518), False, 'from urllib.request import urlopen\n'), ((1564, 1589), 'bs4.BeautifulSoup', 'soup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1568, 1589), True, 'from bs4 import BeautifulSoup as soup\n'), ((412, 450), 're.search', 're.search', (['"""[0-5]"""', "ratingTag['title']"], {}), "('[0-5]', ratingTag['title'])\n", (421, 450), False, 'import re\n')] |
import statistics
import hpbandster.core.result as hpres
# smallest value is best -> reverse_loss = True
# largest value is best -> reverse_loss = False
REVERSE_LOSS = True
EXP_LOSS = 1
OUTLIER_PERC_WORST = 0.1
OUTLIER_PERC_BEST = 0.0
def analyze_bohb(log_dir):
# load the example run from the log files
result = hpres.logged_results_to_HBS_result(log_dir)
# get all executed runs
all_runs = result.get_all_runs()
if __name__ == '__main__':
# load the example run from the log files
result = hpres.logged_results_to_HBS_result('../results/GTNC_evaluate_cmc_subopt_2021-01-21-09_5')
# get all executed runs
all_runs = result.get_all_runs()
t_arr = []
for dat in result.data.values():
for time_stamp in dat.time_stamps.values():
ts = time_stamp['started']
te = time_stamp['finished']
if te-ts > 60:
t_arr.append(te-ts)
print(statistics.mean(t_arr)) | [
"statistics.mean",
"hpbandster.core.result.logged_results_to_HBS_result"
] | [((326, 369), 'hpbandster.core.result.logged_results_to_HBS_result', 'hpres.logged_results_to_HBS_result', (['log_dir'], {}), '(log_dir)\n', (360, 369), True, 'import hpbandster.core.result as hpres\n'), ((524, 618), 'hpbandster.core.result.logged_results_to_HBS_result', 'hpres.logged_results_to_HBS_result', (['"""../results/GTNC_evaluate_cmc_subopt_2021-01-21-09_5"""'], {}), "(\n '../results/GTNC_evaluate_cmc_subopt_2021-01-21-09_5')\n", (558, 618), True, 'import hpbandster.core.result as hpres\n'), ((938, 960), 'statistics.mean', 'statistics.mean', (['t_arr'], {}), '(t_arr)\n', (953, 960), False, 'import statistics\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2D linear elasticity example
Solve the equilibrium equation -\nabla \cdot \sigma(x) = f(x) for x\in\Omega
with the strain-displacement equation:
\epsilon = 1/2(\nabla u + \nabla u^T)
and the constitutive law:
\sigma = 2*\mu*\epsilon + \lambda*(\nabla\cdot u)I,
where \mu and \lambda are Lame constants, I is the identity tensor.
Dirichlet boundary conditions: u(x)=\hat{u} for x\in\Gamma_D
Neumann boundary conditions: \sigma n = \hat{t} for x\in \Gamma_N,
where n is the normal vector.
For this example:
\Omega is a quarter annulus in the 1st quadrant, centered at origin
with inner radius 1, outer radius 4
Symmetry (Dirichlet) boundary conditions on the bottom and left
u_x(x,y) = 0 for x=0
u_y(x,y) = 0 for y=0
and pressure boundary conditions for the curved boundaries:
\sigma n = P_int n on the interior boundary with P_int = 10 MPa
\sigma n = P_ext n on the exterior boundary with P_ext = 0 MPa.
Use DEM
"""
import tensorflow as tf
import numpy as np
import time
from utils.tfp_loss import tfp_function_factory
from utils.Geom_examples import QuarterAnnulus
from utils.Solvers import Elasticity2D_DEM_dist
from utils.Plotting import plot_field_2d
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
#make figures bigger on HiDPI monitors
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 200
np.random.seed(42)
tf.random.set_seed(42)
class Elast_ThickCylinder(Elasticity2D_DEM_dist):
'''
Class including the symmetry boundary conditions for the thick cylinder problem
'''
def __init__(self, layers, train_op, num_epoch, print_epoch, model_data, data_type):
super().__init__(layers, train_op, num_epoch, print_epoch, model_data, data_type)
@tf.function
def dirichletBound(self, X, xPhys, yPhys):
# multiply by x,y for strong imposition of boundary conditions
u_val = X[:,0:1]
v_val = X[:,1:2]
u_val = xPhys*u_val
v_val = yPhys*v_val
return u_val, v_val
#define the model properties
model_data = dict()
model_data["radius_int"] = 1.
model_data["radius_ext"] = 4.
model_data["E"] = 1e2
model_data["nu"] = 0.3
model_data["state"] = "plane strain"
model_data["inner_pressure"] = 10.
model_data["outer_pressure"] = 0.
# generate the model geometry
geomDomain = QuarterAnnulus(model_data["radius_int"], model_data["radius_ext"])
# define the input and output data set
numElemU = 10
numElemV = 10
numGauss = 5
#xPhys, yPhys = myQuad.getRandomIntPts(numPtsU*numPtsV)
xPhys, yPhys, Wint = geomDomain.getQuadIntPts(numElemU, numElemV, numGauss)
data_type = "float32"
Xint = np.concatenate((xPhys,yPhys),axis=1).astype(data_type)
Wint = np.array(Wint).astype(data_type)
# prepare boundary points in the fromat Xbnd = [Xcoord, Ycoord, norm_x, norm_y] and
# Wbnd for boundary integration weights and
# Ybnd = [trac_x, trac_y], where Xcoord, Ycoord are the x and y coordinates of the point,
# norm_x, norm_y are the x and y components of the unit normals
# trac_x, trac_y are the x and y components of the traction vector at each point
# inner curved boundary, include both x and y directions
xPhysBnd, yPhysBnd , xNorm, yNorm, Wbnd = geomDomain.getQuadEdgePts(numElemV, numGauss, 4)
Xbnd = np.concatenate((xPhysBnd, yPhysBnd), axis=1).astype(data_type)
Wbnd = np.array(Wbnd).astype(data_type)
plt.scatter(xPhys, yPhys, s=0.1)
plt.scatter(xPhysBnd, yPhysBnd, s=1, c='red')
plt.title("Boundary and interior integration points")
plt.show()
# define loading
Ybnd_x = -model_data["inner_pressure"]*xNorm
Ybnd_y = -model_data["inner_pressure"]*yNorm
Ybnd = np.concatenate((Ybnd_x, Ybnd_y), axis=1).astype(data_type)
#define the model
tf.keras.backend.set_floatx(data_type)
l1 = tf.keras.layers.Dense(20, "swish")
l2 = tf.keras.layers.Dense(20, "swish")
l3 = tf.keras.layers.Dense(20, "swish")
l4 = tf.keras.layers.Dense(2, None)
train_op = tf.keras.optimizers.Adam()
train_op2 = "TFP-BFGS"
num_epoch = 1000
print_epoch = 100
pred_model = Elast_ThickCylinder([l1, l2, l3, l4], train_op, num_epoch,
print_epoch, model_data, data_type)
#convert the training data to tensors
Xint_tf = tf.convert_to_tensor(Xint)
Wint_tf = tf.convert_to_tensor(Wint)
Xbnd_tf = tf.convert_to_tensor(Xbnd)
Wbnd_tf = tf.convert_to_tensor(Wbnd)
Ybnd_tf = tf.convert_to_tensor(Ybnd)
#training
t0 = time.time()
print("Training (ADAM)...")
pred_model.network_learn(Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
t1 = time.time()
print("Time taken (ADAM)", t1-t0, "seconds")
print("Training (TFP-BFGS)...")
loss_func = tfp_function_factory(pred_model, Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
# convert initial model parameters to a 1D tf.Tensor
init_params = tf.dynamic_stitch(loss_func.idx, pred_model.trainable_variables)
# train the model with L-BFGS solver
results = tfp.optimizer.bfgs_minimize(
value_and_gradients_function=loss_func, initial_position=init_params,
max_iterations=10000, tolerance=1e-14)
# after training, the final optimized parameters are still in results.position
# so we have to manually put them back to the model
loss_func.assign_new_model_parameters(results.position)
t2 = time.time()
print("Time taken (BFGS)", t2-t1, "seconds")
print("Time taken (all)", t2-t0, "seconds")
def cart2pol(x, y):
rho = np.sqrt(np.array(x)**2 + np.array(y)**2)
phi = np.arctan2(y, x)
return rho, phi
# define the exact displacements
def exact_disp(x,y,model):
nu = model["nu"]
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
mu = model["E"]/(2*(1+nu))
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
dispxy = 1/(2*mu*(b**2-a**2))*((1-2*nu)*(p1*a**2-p0*b**2)+(p1-p0)*a**2*b**2/r**2)
ux = x*dispxy
uy = y*dispxy
return ux, uy
#define the exact stresses
def exact_stresses(x,y,model):
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
term_fact = a**2*b**2/(b**2-a**2)
term_one = p1/b**2 - p0/a**2 + (p1-p0)/r**2
term_two = 2*(p1-p0)/r**4
sigma_xx = term_fact*(term_one - term_two*x**2)
sigma_yy = term_fact*(term_one - term_two*y**2)
sigma_xy = term_fact*(-term_two*x*y)
return sigma_xx, sigma_yy, sigma_xy
print("Testing...")
numPtsUTest = 2*numElemU*numGauss
numPtsVTest = 2*numElemV*numGauss
xPhysTest, yPhysTest = geomDomain.getUnifIntPts(numPtsUTest, numPtsVTest, [1,1,1,1])
XTest = np.concatenate((xPhysTest,yPhysTest),axis=1).astype(data_type)
XTest_tf = tf.convert_to_tensor(XTest)
YTest = pred_model(XTest_tf).numpy()
xPhysTest = xPhysTest.astype(data_type)
yPhysTest = yPhysTest.astype(data_type)
stress_xx_comp, stress_yy_comp, stress_xy_comp = pred_model.constitutiveEq(xPhysTest, yPhysTest)
stress_xx_comp = stress_xx_comp.numpy()
stress_yy_comp = stress_yy_comp.numpy()
stress_xy_comp = stress_xy_comp.numpy()
# plot the displacement
plot_field_2d(XTest, YTest[:,0], numPtsUTest, numPtsVTest, title="Computed x-displacement")
plot_field_2d(XTest, YTest[:,1], numPtsUTest, numPtsVTest, title="Computed y-displacement")
# comparison with exact solution
ux_exact, uy_exact = exact_disp(xPhysTest, yPhysTest, model_data)
ux_test = YTest[:,0:1]
uy_test = YTest[:,1:2]
err_norm = np.sqrt(np.sum((ux_exact-ux_test)**2+(uy_exact-uy_test)**2))
ex_norm = np.sqrt(np.sum(ux_exact**2 + uy_exact**2))
rel_err_l2 = err_norm/ex_norm
print("Relative L2 error: ", rel_err_l2)
stress_xx_exact, stress_yy_exact, stress_xy_exact = exact_stresses(xPhysTest,
yPhysTest, model_data)
stress_xx_err = stress_xx_exact - stress_xx_comp
stress_yy_err = stress_yy_exact - stress_yy_comp
stress_xy_err = stress_xx_exact - stress_xx_comp
C_inv = np.linalg.inv(pred_model.Emat.numpy())
energy_err = 0.
energy_norm = 0.
numPts = len(xPhysTest)
for i in range(numPts):
err_pt = np.array([stress_xx_err[i,0],stress_yy_err[i,0],stress_xy_err[i,0]])
norm_pt = np.array([stress_xx_exact[i,0],stress_yy_exact[i,0],stress_xy_exact[i,0]])
energy_err = energy_err + err_pt@C_inv@err_pt.T
energy_norm = energy_norm + norm_pt@C_inv@norm_pt.T
print("Relative energy error: ", np.sqrt(energy_err/energy_norm))
plot_field_2d(XTest, ux_exact-YTest[:,0:1], numPtsUTest, numPtsVTest, title="Error for x-displacement")
plot_field_2d(XTest, uy_exact-YTest[:,1:2], numPtsUTest, numPtsVTest, title="Error for y-displacement")
# plot the stresses
plot_field_2d(XTest, stress_xx_comp, numPtsUTest, numPtsVTest, title="Computed sigma_xx")
plot_field_2d(XTest, stress_yy_comp, numPtsUTest, numPtsVTest, title="Computed sigma_yy")
plot_field_2d(XTest, stress_xy_comp, numPtsUTest, numPtsVTest, title="Computed sigma_xy")
plot_field_2d(XTest, stress_xx_err, numPtsUTest, numPtsVTest, title="Error for sigma_xx")
plot_field_2d(XTest, stress_yy_err, numPtsUTest, numPtsVTest, title="Error for sigma_yy")
plot_field_2d(XTest, stress_xy_err, numPtsUTest, numPtsVTest, title="Error for sigma_xy")
| [
"numpy.sqrt",
"utils.Geom_examples.QuarterAnnulus",
"numpy.array",
"tensorflow.keras.layers.Dense",
"numpy.arctan2",
"utils.tfp_loss.tfp_function_factory",
"tensorflow.dynamic_stitch",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.concatenate",
"tensorflow.convert_to_tensor",
"numpy... | [((1429, 1447), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1443, 1447), True, 'import numpy as np\n'), ((1448, 1470), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (1466, 1470), True, 'import tensorflow as tf\n'), ((2429, 2495), 'utils.Geom_examples.QuarterAnnulus', 'QuarterAnnulus', (["model_data['radius_int']", "model_data['radius_ext']"], {}), "(model_data['radius_int'], model_data['radius_ext'])\n", (2443, 2495), False, 'from utils.Geom_examples import QuarterAnnulus\n'), ((3458, 3490), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xPhys', 'yPhys'], {'s': '(0.1)'}), '(xPhys, yPhys, s=0.1)\n', (3469, 3490), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3536), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xPhysBnd', 'yPhysBnd'], {'s': '(1)', 'c': '"""red"""'}), "(xPhysBnd, yPhysBnd, s=1, c='red')\n", (3502, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3537, 3590), 'matplotlib.pyplot.title', 'plt.title', (['"""Boundary and interior integration points"""'], {}), "('Boundary and interior integration points')\n", (3546, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3599, 3601), True, 'import matplotlib.pyplot as plt\n'), ((3796, 3834), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['data_type'], {}), '(data_type)\n', (3823, 3834), True, 'import tensorflow as tf\n'), ((3840, 3874), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(20)', '"""swish"""'], {}), "(20, 'swish')\n", (3861, 3874), True, 'import tensorflow as tf\n'), ((3880, 3914), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(20)', '"""swish"""'], {}), "(20, 'swish')\n", (3901, 3914), True, 'import tensorflow as tf\n'), ((3920, 3954), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(20)', '"""swish"""'], {}), "(20, 'swish')\n", (3941, 3954), True, 'import tensorflow as tf\n'), ((3960, 3990), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)', 'None'], {}), '(2, None)\n', (3981, 3990), True, 'import tensorflow as tf\n'), ((4002, 4028), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (4026, 4028), True, 'import tensorflow as tf\n'), ((4281, 4307), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Xint'], {}), '(Xint)\n', (4301, 4307), True, 'import tensorflow as tf\n'), ((4318, 4344), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Wint'], {}), '(Wint)\n', (4338, 4344), True, 'import tensorflow as tf\n'), ((4355, 4381), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Xbnd'], {}), '(Xbnd)\n', (4375, 4381), True, 'import tensorflow as tf\n'), ((4392, 4418), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Wbnd'], {}), '(Wbnd)\n', (4412, 4418), True, 'import tensorflow as tf\n'), ((4429, 4455), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Ybnd'], {}), '(Ybnd)\n', (4449, 4455), True, 'import tensorflow as tf\n'), ((4473, 4484), 'time.time', 'time.time', ([], {}), '()\n', (4482, 4484), False, 'import time\n'), ((4589, 4600), 'time.time', 'time.time', ([], {}), '()\n', (4598, 4600), False, 'import time\n'), ((4691, 4768), 'utils.tfp_loss.tfp_function_factory', 'tfp_function_factory', (['pred_model', 'Xint_tf', 'Wint_tf', 'Xbnd_tf', 'Wbnd_tf', 'Ybnd_tf'], {}), '(pred_model, Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)\n', (4711, 4768), False, 'from utils.tfp_loss import tfp_function_factory\n'), ((4836, 4900), 'tensorflow.dynamic_stitch', 'tf.dynamic_stitch', (['loss_func.idx', 'pred_model.trainable_variables'], {}), '(loss_func.idx, pred_model.trainable_variables)\n', (4853, 4900), True, 'import tensorflow as tf\n'), ((4948, 5088), 'tensorflow_probability.optimizer.bfgs_minimize', 'tfp.optimizer.bfgs_minimize', ([], {'value_and_gradients_function': 'loss_func', 'initial_position': 'init_params', 'max_iterations': '(10000)', 'tolerance': '(1e-14)'}), '(value_and_gradients_function=loss_func,\n initial_position=init_params, max_iterations=10000, tolerance=1e-14)\n', (4975, 5088), True, 'import tensorflow_probability as tfp\n'), ((5298, 5309), 'time.time', 'time.time', ([], {}), '()\n', (5307, 5309), False, 'import time\n'), ((6676, 6703), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['XTest'], {}), '(XTest)\n', (6696, 6703), True, 'import tensorflow as tf\n'), ((7066, 7163), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'YTest[:, 0]', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed x-displacement"""'}), "(XTest, YTest[:, 0], numPtsUTest, numPtsVTest, title=\n 'Computed x-displacement')\n", (7079, 7163), False, 'from utils.Plotting import plot_field_2d\n'), ((7158, 7255), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'YTest[:, 1]', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed y-displacement"""'}), "(XTest, YTest[:, 1], numPtsUTest, numPtsVTest, title=\n 'Computed y-displacement')\n", (7171, 7255), False, 'from utils.Plotting import plot_field_2d\n'), ((8385, 8495), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', '(ux_exact - YTest[:, 0:1])', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for x-displacement"""'}), "(XTest, ux_exact - YTest[:, 0:1], numPtsUTest, numPtsVTest,\n title='Error for x-displacement')\n", (8398, 8495), False, 'from utils.Plotting import plot_field_2d\n'), ((8489, 8599), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', '(uy_exact - YTest[:, 1:2])', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for y-displacement"""'}), "(XTest, uy_exact - YTest[:, 1:2], numPtsUTest, numPtsVTest,\n title='Error for y-displacement')\n", (8502, 8599), False, 'from utils.Plotting import plot_field_2d\n'), ((8614, 8708), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xx_comp', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed sigma_xx"""'}), "(XTest, stress_xx_comp, numPtsUTest, numPtsVTest, title=\n 'Computed sigma_xx')\n", (8627, 8708), False, 'from utils.Plotting import plot_field_2d\n'), ((8704, 8798), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_yy_comp', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed sigma_yy"""'}), "(XTest, stress_yy_comp, numPtsUTest, numPtsVTest, title=\n 'Computed sigma_yy')\n", (8717, 8798), False, 'from utils.Plotting import plot_field_2d\n'), ((8794, 8888), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xy_comp', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed sigma_xy"""'}), "(XTest, stress_xy_comp, numPtsUTest, numPtsVTest, title=\n 'Computed sigma_xy')\n", (8807, 8888), False, 'from utils.Plotting import plot_field_2d\n'), ((8885, 8979), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xx_err', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for sigma_xx"""'}), "(XTest, stress_xx_err, numPtsUTest, numPtsVTest, title=\n 'Error for sigma_xx')\n", (8898, 8979), False, 'from utils.Plotting import plot_field_2d\n'), ((8975, 9069), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_yy_err', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for sigma_yy"""'}), "(XTest, stress_yy_err, numPtsUTest, numPtsVTest, title=\n 'Error for sigma_yy')\n", (8988, 9069), False, 'from utils.Plotting import plot_field_2d\n'), ((9065, 9159), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xy_err', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for sigma_xy"""'}), "(XTest, stress_xy_err, numPtsUTest, numPtsVTest, title=\n 'Error for sigma_xy')\n", (9078, 9159), False, 'from utils.Plotting import plot_field_2d\n'), ((5482, 5498), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (5492, 5498), True, 'import numpy as np\n'), ((5609, 5623), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (5617, 5623), True, 'import numpy as np\n'), ((5983, 5997), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (5991, 5997), True, 'import numpy as np\n'), ((7423, 7484), 'numpy.sum', 'np.sum', (['((ux_exact - ux_test) ** 2 + (uy_exact - uy_test) ** 2)'], {}), '((ux_exact - ux_test) ** 2 + (uy_exact - uy_test) ** 2)\n', (7429, 7484), True, 'import numpy as np\n'), ((7494, 7531), 'numpy.sum', 'np.sum', (['(ux_exact ** 2 + uy_exact ** 2)'], {}), '(ux_exact ** 2 + uy_exact ** 2)\n', (7500, 7531), True, 'import numpy as np\n'), ((8050, 8123), 'numpy.array', 'np.array', (['[stress_xx_err[i, 0], stress_yy_err[i, 0], stress_xy_err[i, 0]]'], {}), '([stress_xx_err[i, 0], stress_yy_err[i, 0], stress_xy_err[i, 0]])\n', (8058, 8123), True, 'import numpy as np\n'), ((8133, 8212), 'numpy.array', 'np.array', (['[stress_xx_exact[i, 0], stress_yy_exact[i, 0], stress_xy_exact[i, 0]]'], {}), '([stress_xx_exact[i, 0], stress_yy_exact[i, 0], stress_xy_exact[i, 0]])\n', (8141, 8212), True, 'import numpy as np\n'), ((8350, 8383), 'numpy.sqrt', 'np.sqrt', (['(energy_err / energy_norm)'], {}), '(energy_err / energy_norm)\n', (8357, 8383), True, 'import numpy as np\n'), ((2739, 2777), 'numpy.concatenate', 'np.concatenate', (['(xPhys, yPhys)'], {'axis': '(1)'}), '((xPhys, yPhys), axis=1)\n', (2753, 2777), True, 'import numpy as np\n'), ((2801, 2815), 'numpy.array', 'np.array', (['Wint'], {}), '(Wint)\n', (2809, 2815), True, 'import numpy as np\n'), ((3354, 3398), 'numpy.concatenate', 'np.concatenate', (['(xPhysBnd, yPhysBnd)'], {'axis': '(1)'}), '((xPhysBnd, yPhysBnd), axis=1)\n', (3368, 3398), True, 'import numpy as np\n'), ((3424, 3438), 'numpy.array', 'np.array', (['Wbnd'], {}), '(Wbnd)\n', (3432, 3438), True, 'import numpy as np\n'), ((3717, 3757), 'numpy.concatenate', 'np.concatenate', (['(Ybnd_x, Ybnd_y)'], {'axis': '(1)'}), '((Ybnd_x, Ybnd_y), axis=1)\n', (3731, 3757), True, 'import numpy as np\n'), ((6602, 6648), 'numpy.concatenate', 'np.concatenate', (['(xPhysTest, yPhysTest)'], {'axis': '(1)'}), '((xPhysTest, yPhysTest), axis=1)\n', (6616, 6648), True, 'import numpy as np\n'), ((5439, 5450), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5447, 5450), True, 'import numpy as np\n'), ((5456, 5467), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5464, 5467), True, 'import numpy as np\n')] |
from typing import Sequence, Any
import torch
def clamp_n(tensor: torch.Tensor, min_values: Sequence[Any], max_values: Sequence[Any]) -> torch.Tensor:
"""
Clamp a tensor with axis dependent values.
Args:
tensor: a N-d torch.Tensor
min_values: a 1D torch.Tensor. Min value is axis dependent
max_values: a 1D torch.Tensor. Max value is axis dependent
Returns:
tensor with values clamped to min_values and max_values
Examples:
>>> t = torch.LongTensor([[1, 2, 3], [4, 5, 6]])
>>> min_values = torch.LongTensor([3, 2, 4])
>>> max_values = torch.LongTensor([3, 4, 8])
>>> clamped_t = clamp_n(t, min_values, max_values)
"""
assert isinstance(min_values, torch.Tensor)
assert isinstance(max_values, torch.Tensor)
assert min_values.shape == max_values.shape
if len(min_values.shape) == 1:
min_values = min_values.unsqueeze(dim=0)
max_values = max_values.unsqueeze(dim=0)
else:
assert min_values.shape[0] == 1, 'must be broadcastable to tensor shape'
assert max_values.shape[0] == 1, 'must be broadcastable to tensor shape'
return torch.max(torch.min(tensor, max_values), min_values)
| [
"torch.min"
] | [((1181, 1210), 'torch.min', 'torch.min', (['tensor', 'max_values'], {}), '(tensor, max_values)\n', (1190, 1210), False, 'import torch\n')] |
from typing import Any, Dict
from meiga import Result, Error, Success
from petisco import AggregateRoot
from datetime import datetime
from taskmanager.src.modules.tasks.domain.description import Description
from taskmanager.src.modules.tasks.domain.events import TaskCreated
from taskmanager.src.modules.tasks.domain.task_id import TaskId
from taskmanager.src.modules.tasks.domain.title import Title
class Task(AggregateRoot):
def __init__(
self, task_id: TaskId, title: str, description: str, created_at: datetime
):
self.task_id = task_id
self.title = title
self.description = description
self.created_at = created_at
super().__init__()
@staticmethod
def create(task_id: TaskId, title: Title, description: Description):
user = Task(task_id, title, description, datetime.utcnow())
user.record(TaskCreated(task_id))
return user
def to_result(self) -> Result[Any, Error]:
return Success(self)
def to_dict(self) -> Dict:
return {
"task_id": self.task_id,
"title": self.title,
"description": self.description,
"created_at": self.created_at.isoformat(),
}
| [
"meiga.Success",
"taskmanager.src.modules.tasks.domain.events.TaskCreated",
"datetime.datetime.utcnow"
] | [((984, 997), 'meiga.Success', 'Success', (['self'], {}), '(self)\n', (991, 997), False, 'from meiga import Result, Error, Success\n'), ((840, 857), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (855, 857), False, 'from datetime import datetime\n'), ((879, 899), 'taskmanager.src.modules.tasks.domain.events.TaskCreated', 'TaskCreated', (['task_id'], {}), '(task_id)\n', (890, 899), False, 'from taskmanager.src.modules.tasks.domain.events import TaskCreated\n')] |
import stacks1
def is_match(ch1, ch2):
match_dict = {
")": "(",
"]": "[",
"}": "{"
}
return match_dict[ch1] == ch2
def is_balanced(s):
stack = stacks1.Stack()
for ch in s:
if ch == '(' or ch == '{' or ch == '[':
stack.push(ch)
if ch == ')' or ch == '}' or ch == ']':
if stack.size() == 0:
return False
if not is_match(ch, stack.pop()):
return False
return stack.size()==0
print(is_balanced("))((a+b}{"))
print(is_balanced("((a+b))"))
print(is_balanced("))"))
print(is_balanced("[a+b]*(x+2y)*{hh+kk}")) | [
"stacks1.Stack"
] | [((189, 204), 'stacks1.Stack', 'stacks1.Stack', ([], {}), '()\n', (202, 204), False, 'import stacks1\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 18:30:29 2018
@author: Koushik
"""
import pandas as pd
from IPython.display import display
import sys
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 19:04:35 2018
@author: Koushik
"""
#Python 2.x program for Speech Recognition
import re
#enter the name of usb microphone that you found
#using lsusb
#the following name is only used as an example
#mic_name = "USB Device 0x46d:0x825: Audio (hw:1, 0)"
#device_id = "MMDEVAPI\AudioEndpoints"
#Sample rate is how often values are recorded
#mic_list = sr.Microphone.list_microphone_names()
#the following loop aims to set the device ID of the mic that
#we specifically want to use to avoid ambiguity.
#for i, microphone_name in enumerate(mic_list):
# if microphone_name == mic_name:
# device_id = i
#use the microphone as source for input. Here, we also specify
#which device ID to specifically look for incase the microphone
#is not working, an error will pop up saying "device_id undefined"
products = pd.read_csv('products.csv')
products = products['product_name'].tolist();
for i in range(0,len(products)):
products[i] = products[i].lower()
#print(products)
text = ' '.join(sys.argv[1:])
text = re.split(' and |order |some | like | love |',text)
#print(text)
new_text = ''
for i in text:
print(i)
if i in products:
new_text+=i+","
new_text = new_text.split(',')
#print(new_text)
analysis = pd.read_csv("output.csv")
analysis["itemA"]=analysis['itemA'].str.lower()
#item_name = ("The Red ONe: Squished Fruit Smoothies","Santa Fe Extra Lean Veggie Burger")
#print(analysis['itemA'])
#columns = ["itemB"]
#df[df['B']==3]['A']
analysis_specific_data = pd.DataFrame()
for i in new_text:
analysis_specific = analysis.loc[analysis['itemA']==i]
analysis_specific_data = analysis_specific_data.append(analysis_specific)
#print(analysis_specific)
analysis_specific_data = analysis_specific_data.sort_values('lift',ascending = False)
analysis_specific_data.to_html('recommend_table.html')
| [
"pandas.DataFrame",
"re.split",
"pandas.read_csv"
] | [((1062, 1089), 'pandas.read_csv', 'pd.read_csv', (['"""products.csv"""'], {}), "('products.csv')\n", (1073, 1089), True, 'import pandas as pd\n'), ((1267, 1318), 're.split', 're.split', (['""" and |order |some | like | love |"""', 'text'], {}), "(' and |order |some | like | love |', text)\n", (1275, 1318), False, 'import re\n'), ((1487, 1512), 'pandas.read_csv', 'pd.read_csv', (['"""output.csv"""'], {}), "('output.csv')\n", (1498, 1512), True, 'import pandas as pd\n'), ((1753, 1767), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1765, 1767), True, 'import pandas as pd\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from paddle.fluid import core, framework, unique_name
from .meta_optimizer_base import MetaOptimizerBase
__all__ = []
class FP16AllReduceOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super(FP16AllReduceOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
# we do not allow meta optimizer to be inner optimizer currently
self.meta_optimizers_white_list = [
"LarsOptimizer",
"LambOptimizer",
"RecomputeOptimizer",
"LocalSGDOptimizer",
"GradientMergeOptimizer",
"GraphExecutionOptimizer",
"AdaptiveLocalSGDOptimizer",
]
self.meta_optimizers_black_list = ["DGCOptimizer"]
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
user_defined_strategy):
super(FP16AllReduceOptimizer, self)._set_basic_info(
loss, role_maker, user_defined_optimizer, user_defined_strategy)
def _can_apply(self):
if not self.role_maker._is_collective:
return False
if self.user_defined_strategy.fp16_allreduce:
return True
return False
def _disable_strategy(self, dist_strategy):
dist_strategy.fp16_allreduce = False
def _enable_strategy(self, dist_strategy, context=None):
dist_strategy.fp16_allreduce = True
@staticmethod
def fp16_compression(param_and_grads):
"""
Compress fp32 gradients to fp16 during allreduce.
"""
op_maker = core.op_proto_and_checker_maker
new_param_and_grads = [] # param, grad, is_cast
# cast grad from fp32->fp16 before allreduce,
for param, grad in param_and_grads:
if grad is None or grad.dtype != core.VarDesc.VarType.FP32:
new_param_and_grads.append((param, grad, False))
continue
op = grad.op
block = grad.block
var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]
if param.name not in var_attr:
new_param_and_grads.append((param, grad, False))
continue
# remove (param, grad) from op_role_var
var_attr.remove(param.name)
var_attr.remove(grad.name)
if len(var_attr) > 1:
op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)
else:
op._remove_attr(op_maker.kOpRoleVarAttrName())
new_grad = block.create_var(
name=unique_name.generate(grad.name + ".cast_fp16"),
dtype=core.VarDesc.VarType.FP16,
persistable=False,
stop_gradient=True)
with block.program._backward_role_guard():
cast_op = block.append_op(
type="cast",
inputs={"X": grad},
outputs={"Out": new_grad},
attrs={
"in_dtype": core.VarDesc.VarType.FP32,
"out_dtype": core.VarDesc.VarType.FP16
},
stop_gradient=True)
backward = op_maker.OpRole.Backward
cast_op._set_attr(op_maker.kOpRoleAttrName(), backward)
cast_op._set_attr(op_maker.kOpRoleVarAttrName(),
[param.name, new_grad.name])
new_grad.op = cast_op
new_param_and_grads.append((param, new_grad, True))
ret_param_and_grads = []
# cast grad from fp16->fp32 after allreduce.
# NOTE. Now we split fp16 compression into two for loops,
# if we do not separate them, fuse allreduce will wrong.
# This must be the problem of fuse allreduce pass, need
# fixed in future.
for param, grad, cast in new_param_and_grads:
if not cast:
ret_param_and_grads.append((param, grad))
continue
block = grad.block
new_grad = block.create_var(
name=unique_name.generate(grad.name + ".cast_fp32"),
dtype=core.VarDesc.VarType.FP32,
persistable=False,
stop_gradient=True)
with block.program._optimized_guard(
[param, grad]), framework.name_scope('fp16_allreduce'):
cast_op = block.append_op(
type="cast",
inputs={"X": grad},
outputs={"Out": new_grad},
attrs={
"in_dtype": core.VarDesc.VarType.FP16,
"out_dtype": core.VarDesc.VarType.FP32
},
stop_gradient=True)
ret_param_and_grads.append((param, new_grad))
return ret_param_and_grads
def apply_optimize(self, loss, startup_program, params_grads):
new_params_grads = self.fp16_compression(params_grads)
return self.inner_opt.apply_optimize(
loss,
startup_program=startup_program,
params_grads=new_params_grads)
| [
"paddle.fluid.unique_name.generate",
"paddle.fluid.framework.name_scope"
] | [((4899, 4937), 'paddle.fluid.framework.name_scope', 'framework.name_scope', (['"""fp16_allreduce"""'], {}), "('fp16_allreduce')\n", (4919, 4937), False, 'from paddle.fluid import core, framework, unique_name\n'), ((3124, 3170), 'paddle.fluid.unique_name.generate', 'unique_name.generate', (["(grad.name + '.cast_fp16')"], {}), "(grad.name + '.cast_fp16')\n", (3144, 3170), False, 'from paddle.fluid import core, framework, unique_name\n'), ((4649, 4695), 'paddle.fluid.unique_name.generate', 'unique_name.generate', (["(grad.name + '.cast_fp32')"], {}), "(grad.name + '.cast_fp32')\n", (4669, 4695), False, 'from paddle.fluid import core, framework, unique_name\n')] |
import re
import sys
import os
# Lists of same characters
alpha_equiv = ['Α','Ά','ά','ὰ','ά','ἀ','ἁ','ἂ','ἃ','ἄ','ἅ','ἆ','ἇ','Ἀ','Ἁ','Ἂ','Ἃ','Ἄ','Ἅ','Ἆ','Ἇ','ᾶ','Ᾰ','Ᾱ','Ὰ','Ά','ᾰ','ᾱ'] #Converts to α
alpha_subscripted = ['ᾀ','ᾁ','ᾂ','ᾃ','ᾄ','ᾅ','ᾆ','ᾇ','ᾈ','ᾉ','ᾊ','ᾋ','ᾌ','ᾍ','ᾎ','ᾏ','ᾲ','ᾴ','ᾷ','ᾼ','ᾳ'] #Converts to ᾳ
epsilon_equiv = ['Ε','Έ','έ','ὲ','έ','ἐ','ἑ','ἒ','ἓ','ἔ','ἕ','Ἐ','Ἑ','Ἒ','Ἓ','Ἔ','Ἕ'] #Converts to ε
eta_equiv = ['Η','Ή','ή','ὴ','ή','ἠ','ἡ','ἢ','ἣ','ἤ','ἥ','ἦ','ἧ','Ἠ','Ἡ','Ἢ','Ἣ','Ἤ','Ἥ','Ἦ','Ἧ','Ὲ','Έ','Ὴ','Ή','ῆ'] #Converts to η
eta_subscripted = ['ᾐ','ᾑ','ᾒ','ᾓ','ᾔ','ᾕ','ᾖ','ᾗ','ᾘ','ᾙ','ᾚ','ᾛ','ᾜ','ᾝ','ᾞ''ᾟ','ῂ','ῄ','ῇ','ῌ','ῃ'] #Converts to ῃ
iota_equiv = ['Ι','Ί','ΐ','Ϊ','ί','ϊ','ὶ','ί','ἰ','ἱ','ἲ','ἳ','ἴ','ἵ','ἶ','ἷ','Ἰ','Ἱ','Ἲ','Ἳ','Ἴ','Ἵ','Ἶ','Ἷ','ῐ','ῑ','ῒ','ΐ','ῖ','ῗ','Ῐ','Ῑ','Ὶ','Ί'] #Converts to ι
omicron_equiv = ['Ο','Ό','ό','ὸ','ό','ὀ','ὁ','ὂ','ὃ','ὄ','ὅ','Ὀ','Ὁ','Ὂ','Ὃ','Ὄ','Ὅ'] #Converts to ο
upsilon_equiv = ['Υ','Ύ','Ϋ','ΰ','ϋ','ύ','ὺ','ύ','ὐ','ὑ','ὒ','ὓ','ὔ','ὕ','ὖ','ὗ','Ὑ','Ὓ','Ὕ','Ὗ','ΰ','ῦ','ῧ','Ῠ','Ῡ','Ὺ','Ύ'] #Converts to υ
omega_equiv = ['Ω','Ώ','ώ','ὼ','ώ','ὠ','ὡ','ὢ','ὣ','ὤ','ὥ','ὦ','ὧ','Ὠ','Ὡ','Ὢ','Ὣ','Ὤ','Ὥ','Ὦ','Ὧ','ῶ','Ὸ','Ό','Ὼ','Ώ'] #Converts to ω
omega_subscripted = ['ᾠ','ᾡ','ᾢ','ᾣ','ᾤ','ᾥ','ᾦ','ᾧ','ᾨ','ᾩ','ᾪ','ᾫ','ᾬ','ᾭ','ᾮ','ᾯ','ῲ','ῴ','ῷ','ῼ','ῳ'] #Converts to ῳ
rho_equiv = ['Ρ','ῤ','ῥ','Ῥ'] #Converts to ρ
uppercase = {'Β':'β','Γ':'γ','Δ':'δ','Ζ':'ζ','Θ':'θ','Κ':'κ','Λ':'λ','Μ':'μ','Ν':'ν','Ξ':'ξ','Π':'π','Σ':'σ','Τ':'τ','Φ':'φ','Χ':'χ','Ψ':'ψ'}
def normalizer(char_list, normal_char, string):
for char in char_list:
string = string.replace(char, normal_char)
return string
def normalize(data, ignore_subscript=True):
# Remove brackets and normalize characters to textually significant letters
data = re.sub(r'(\[|\])', '', data)
data = normalizer(alpha_equiv, 'α', data)
data = normalizer(epsilon_equiv, 'ε', data)
data = normalizer(eta_equiv, 'η', data)
data = normalizer(iota_equiv, 'ι', data)
data = normalizer(omicron_equiv, 'ο', data)
data = normalizer(upsilon_equiv, 'υ', data)
data = normalizer(omega_equiv, 'ω', data)
data = normalizer(rho_equiv, 'ρ', data)
if ignore_subscript:
data = normalizer(alpha_subscripted, 'α', data)
data = normalizer(eta_subscripted, 'η', data)
data = normalizer(omega_subscripted, 'ω', data)
else:
data = normalizer(alpha_subscripted, 'ᾳ', data)
data = normalizer(eta_subscripted, 'ῃ', data)
data = normalizer(omega_subscripted, 'ῳ', data)
# Lowercase everything
for cap in list(uppercase):
data = data.replace(cap, uppercase[cap])
return data
def main():
# Prior checks
arg_num = len(sys.argv)
if arg_num == 1:
sys.exit('Program needs a file to process.')
# Ignore iota subscripts by default
try:
if sys.argv[2].lower() == 'false':
ignore_subscript = False
else:
ignore_subscript = True
except:
ignore_subscript = True
print('Ignoring iota subscripts.')
# Open files
input = open(sys.argv[1])
data = input.read()
filename = os.path.splitext(sys.argv[1])[0] + '_normalized.txt'
output = open(filename, 'w')
data = normalize(data, ignore_subscript)
output.write(data)
# Close files
input.close()
output.close()
if __name__ == '__main__':
main()
| [
"re.sub",
"os.path.splitext",
"sys.exit"
] | [((1807, 1836), 're.sub', 're.sub', (['"""(\\\\[|\\\\])"""', '""""""', 'data'], {}), "('(\\\\[|\\\\])', '', data)\n", (1813, 1836), False, 'import re\n'), ((2786, 2830), 'sys.exit', 'sys.exit', (['"""Program needs a file to process."""'], {}), "('Program needs a file to process.')\n", (2794, 2830), False, 'import sys\n'), ((3185, 3214), 'os.path.splitext', 'os.path.splitext', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3201, 3214), False, 'import os\n')] |