hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b63d95f8ee5f617b3035015f0897ae4868cbabe | 1,756 | py | Python | contracts/interfaces/event_manager.py | iconation/ICONSafe-SCORE | a3dcea7fa182152e66f5cddaacebef8a7350c332 | [
"Apache-2.0"
] | 2 | 2020-08-17T10:39:42.000Z | 2020-09-07T14:10:39.000Z | contracts/interfaces/event_manager.py | iconation/MultiSigWallet | a3dcea7fa182152e66f5cddaacebef8a7350c332 | [
"Apache-2.0"
] | null | null | null | contracts/interfaces/event_manager.py | iconation/MultiSigWallet | a3dcea7fa182152e66f5cddaacebef8a7350c332 | [
"Apache-2.0"
] | 2 | 2021-02-07T11:30:53.000Z | 2021-04-17T19:52:21.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 ICONation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from iconservice import *
from .address_registrar import *
class ABCEventManager(InterfaceScore):
@interface
def name(self) -> str:
pass
@interface
def get_events(self, offset: int = 0) -> list:
pass
@interface
def on_add_event(self) -> None:
pass
class EventManagerProxy(AddressRegistrarProxy):
NAME = "EVENT_MANAGER_PROXY"
# ================================================
# Fields
# ================================================
@property
def event_manager(self):
address = self.registrar.resolve(EventManagerProxy.NAME)
if not address:
raise AddressNotInRegistrar(EventManagerProxy.NAME)
return self.create_interface_score(address, ABCEventManager)
def add_event(func):
if not isfunction(func):
revert('NotAFunctionError')
@wraps(func)
def __wrapper(self: object, *args, **kwargs):
try:
self.event_manager.on_add_event()
except AddressNotInRegistrar:
# Registrar may not be configured yet
pass
return func(self, *args, **kwargs)
return __wrapper
| 26.606061 | 74 | 0.642938 |
318c0abdd34aaee963e3e12f60d4d2cd2cf965b4 | 72,353 | py | Python | hydrus/client/gui/QtPorting.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | null | null | null | hydrus/client/gui/QtPorting.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | null | null | null | hydrus/client/gui/QtPorting.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | null | null | null | #This file is licensed under the Do What the Fuck You Want To Public License aka WTFPL
import os
# If not explicitely set, prefer PySide2 instead of the qtpy default which is PyQt5
# It is important that this runs on startup *before* anything is imported from qtpy.
# Since test.py, client.py and client.pyw all import this module first before any other Qt related ones, this requirement is satisfied.
if not 'QT_API' in os.environ:
try:
import PySide2
os.environ[ 'QT_API' ] = 'pyside2'
except ImportError as e:
pass
#
import qtpy
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
import math
import typing
from collections import defaultdict
if qtpy.PYQT5:
import sip # pylint: disable=E0401
def isValid( obj ):
if isinstance( obj, sip.simplewrapper ):
return not sip.isdeleted( obj )
return True
elif qtpy.PYSIDE2:
import shiboken2
isValid = shiboken2.isValid
else:
raise RuntimeError( 'You need either PySide2 or PyQt5' )
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.client import ClientConstants as CC
def MonkeyPatchMissingMethods():
if qtpy.PYQT5:
def MonkeyPatchGetSaveFileName( original_function ):
def new_function( *args, **kwargs ):
if 'selectedFilter' in kwargs:
kwargs[ 'initialFilter' ] = kwargs[ 'selectedFilter' ]
del kwargs[ 'selectedFilter' ]
return original_function( *args, **kwargs )
return new_function
QW.QFileDialog.getSaveFileName = MonkeyPatchGetSaveFileName( QW.QFileDialog.getSaveFileName )
class HBoxLayout( QW.QHBoxLayout ):
def __init__( self, margin = 2, spacing = 2 ):
QW.QHBoxLayout.__init__( self )
self.setMargin( margin )
self.setSpacing( spacing )
def setMargin( self, val ):
self.setContentsMargins( val, val, val, val )
class VBoxLayout( QW.QVBoxLayout ):
def __init__( self, margin = 2, spacing = 2 ):
QW.QVBoxLayout.__init__( self )
self.setMargin( margin )
self.setSpacing( spacing )
def setMargin( self, val ):
self.setContentsMargins( val, val, val, val )
class LabelledSlider( QW.QWidget ):
def __init__( self, parent = None ):
QW.QWidget.__init__( self, parent )
self.setLayout( VBoxLayout( spacing = 2 ) )
top_layout = HBoxLayout( spacing = 2 )
self._min_label = QW.QLabel()
self._max_label = QW.QLabel()
self._value_label = QW.QLabel()
self._slider = QW.QSlider()
self._slider.setOrientation( QC.Qt.Horizontal )
self._slider.setTickInterval( 1 )
self._slider.setTickPosition( QW.QSlider.TicksBothSides )
top_layout.addWidget( self._min_label )
top_layout.addWidget( self._slider )
top_layout.addWidget( self._max_label )
self.layout().addLayout( top_layout )
self.layout().addWidget( self._value_label )
self._value_label.setAlignment( QC.Qt.AlignVCenter | QC.Qt.AlignHCenter )
self.layout().setAlignment( self._value_label, QC.Qt.AlignHCenter )
self._slider.valueChanged.connect( self._UpdateLabels )
self._UpdateLabels()
def _UpdateLabels( self ):
self._min_label.setText( str( self._slider.minimum() ) )
self._max_label.setText( str( self._slider.maximum() ) )
self._value_label.setText( str( self._slider.value() ) )
def GetValue( self ):
return self._slider.value()
def SetRange( self, min, max ):
self._slider.setRange( min, max )
self._UpdateLabels()
def SetValue( self, value ):
self._slider.setValue( value )
self._UpdateLabels()
def SplitterVisibleCount( splitter ):
count = 0
for i in range( splitter.count() ):
if splitter.widget( i ).isVisibleTo( splitter ): count += 1
return count
class DirPickerCtrl( QW.QWidget ):
dirPickerChanged = QC.Signal()
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
layout = HBoxLayout( spacing = 2 )
self._path_edit = QW.QLineEdit( self )
self._button = QW.QPushButton( 'browse', self )
self._button.clicked.connect( self._Browse )
self._path_edit.textEdited.connect( self._TextEdited )
layout.addWidget( self._path_edit )
layout.addWidget( self._button )
self.setLayout( layout )
def SetPath( self, path ):
self._path_edit.setText( path )
def GetPath( self ):
return self._path_edit.text()
def _Browse( self ):
existing_path = self._path_edit.text()
if HG.client_controller.new_options.GetBoolean( 'use_qt_file_dialogs' ):
options = QW.QFileDialog.Options( QW.QFileDialog.DontUseNativeDialog )
else:
options = QW.QFileDialog.Options()
path = QW.QFileDialog.getExistingDirectory( self, '', existing_path, options = options )
if path == '':
return
path = os.path.normpath( path )
self._path_edit.setText( path )
if os.path.exists( path ):
self.dirPickerChanged.emit()
def _TextEdited( self, text ):
if os.path.exists( text ):
self.dirPickerChanged.emit()
class FilePickerCtrl( QW.QWidget ):
filePickerChanged = QC.Signal()
def __init__( self, parent = None, wildcard = None, starting_directory = None ):
QW.QWidget.__init__( self, parent )
layout = HBoxLayout( spacing = 2 )
self._path_edit = QW.QLineEdit( self )
self._button = QW.QPushButton( 'browse', self )
self._button.clicked.connect( self._Browse )
self._path_edit.textEdited.connect( self._TextEdited )
layout.addWidget( self._path_edit )
layout.addWidget( self._button )
self.setLayout( layout )
self._save_mode = False
self._wildcard = wildcard
self._starting_directory = starting_directory
def SetPath( self, path ):
self._path_edit.setText( path )
def GetPath( self ):
return self._path_edit.text()
def SetSaveMode( self, save_mode ):
self._save_mode = save_mode
def _Browse( self ):
existing_path = self._path_edit.text()
if existing_path == '' and self._starting_directory is not None:
existing_path = self._starting_directory
if HG.client_controller.new_options.GetBoolean( 'use_qt_file_dialogs' ):
options = QW.QFileDialog.Options( QW.QFileDialog.DontUseNativeDialog )
else:
options = QW.QFileDialog.Options()
if self._save_mode:
if self._wildcard:
path = QW.QFileDialog.getSaveFileName( self, '', existing_path, filter = self._wildcard, selectedFilter = self._wildcard, options = options )[0]
else:
path = QW.QFileDialog.getSaveFileName( self, '', existing_path, options = options )[0]
else:
if self._wildcard:
path = QW.QFileDialog.getOpenFileName( self, '', existing_path, filter = self._wildcard, selectedFilter = self._wildcard, options = options )[0]
else:
path = QW.QFileDialog.getOpenFileName( self, '', existing_path, options = options )[0]
if path == '':
return
path = os.path.normpath( path )
self._path_edit.setText( path )
if self._save_mode or os.path.exists( path ):
self.filePickerChanged.emit()
def _TextEdited( self, text ):
if self._save_mode or os.path.exists( text ):
self.filePickerChanged.emit()
class TabBar( QW.QTabBar ):
tabDoubleLeftClicked = QC.Signal( int )
tabMiddleClicked = QC.Signal( int )
tabSpaceDoubleLeftClicked = QC.Signal()
tabSpaceDoubleMiddleClicked = QC.Signal()
def __init__( self, parent = None ):
QW.QTabBar.__init__( self, parent )
self.setMouseTracking( True )
self.setAcceptDrops( True )
self._supplementary_drop_target = None
self._last_clicked_tab_index = -1
self._last_clicked_global_pos = None
def AddSupplementaryTabBarDropTarget( self, drop_target ):
self._supplementary_drop_target = drop_target
def clearLastClickedTabInfo( self ):
self._last_clicked_tab_index = -1
self._last_clicked_global_pos = None
def event( self, event ):
return QW.QTabBar.event( self, event )
def mouseMoveEvent( self, e ):
e.ignore()
def mousePressEvent( self, event ):
index = self.tabAt( event.pos() )
if event.button() == QC.Qt.LeftButton:
self._last_clicked_tab_index = index
self._last_clicked_global_pos = event.globalPos()
QW.QTabBar.mousePressEvent( self, event )
def mouseReleaseEvent( self, event ):
index = self.tabAt( event.pos() )
if event.button() == QC.Qt.MiddleButton:
if index != -1:
self.tabMiddleClicked.emit( index )
return
QW.QTabBar.mouseReleaseEvent( self, event )
def mouseDoubleClickEvent( self, event ):
index = self.tabAt( event.pos() )
if event.button() == QC.Qt.LeftButton:
if index == -1:
self.tabSpaceDoubleLeftClicked.emit()
else:
self.tabDoubleLeftClicked.emit( index )
return
elif event.button() == QC.Qt.MiddleButton:
if index == -1:
self.tabSpaceDoubleMiddleClicked.emit()
else:
self.tabMiddleClicked.emit( index )
return
QW.QTabBar.mouseDoubleClickEvent( self, event )
def dragEnterEvent(self, event):
if 'application/hydrus-tab' in event.mimeData().formats():
event.ignore()
else:
event.accept()
def dragMoveEvent( self, event ):
if 'application/hydrus-tab' not in event.mimeData().formats():
tab_index = self.tabAt( event.pos() )
if tab_index != -1:
self.parentWidget().setCurrentIndex( tab_index )
else:
event.ignore()
def lastClickedTabInfo( self ):
return ( self._last_clicked_tab_index, self._last_clicked_global_pos )
def dropEvent( self, event ):
if self._supplementary_drop_target:
self._supplementary_drop_target.eventFilter( self, event )
else:
event.ignore()
# A heavily extended/tweaked version of https://forum.qt.io/topic/67542/drag-tabs-between-qtabwidgets/
class TabWidgetWithDnD( QW.QTabWidget ):
pageDragAndDropped = QC.Signal( QW.QWidget, QW.QWidget )
def __init__( self, parent = None ):
QW.QTabWidget.__init__( self, parent )
self.setTabBar( TabBar( self ) )
self.setAcceptDrops( True )
self._tab_bar = self.tabBar()
self._supplementary_drop_target = None
def _LayoutPagesHelper( self ):
current_index = self.currentIndex()
for i in range( self.count() ):
self.setCurrentIndex( i )
if isinstance( self.widget( i ), TabWidgetWithDnD ):
self.widget( i )._LayoutPagesHelper()
self.setCurrentIndex( current_index )
def LayoutPages( self ):
# hydev adds: I no longer call this, as I moved splitter setting to a thing called per page when page is first visibly shown
# leaving it here for now in case I need it again
# Momentarily switch to each page, then back, forcing a layout update.
# If this is not done, the splitters on the hidden pages won't resize their widgets properly when we restore
# splitter sizes after this, since they would never became visible.
# We first have to climb up the widget hierarchy and go down recursively from the root tab widget,
# since it's not enough to make a page visible if its a nested page: all of its ancestor pages have to be visible too.
# This shouldn't be visible to users since we switch back immediately.
# There is probably a proper way to do this...
highest_ancestor_of_same_type = self
parent = self.parentWidget()
while parent is not None:
if isinstance( parent, TabWidgetWithDnD ):
highest_ancestor_of_same_type = parent
parent = parent.parentWidget()
highest_ancestor_of_same_type._LayoutPagesHelper() # This does the actual recursive descent and making pages visible
# This is a hack that adds an additional drop target to the tab bar. The added drop target will get drop events from the tab bar.
# Used to make the case of files/media droppend onto tabs work.
def AddSupplementaryTabBarDropTarget( self, drop_target ):
self._supplementary_drop_target = drop_target
self.tabBar().AddSupplementaryTabBarDropTarget( drop_target )
def mouseMoveEvent( self, e ):
if self.currentWidget() and self.currentWidget().rect().contains( self.currentWidget().mapFromGlobal( self.mapToGlobal( e.pos() ) ) ):
QW.QTabWidget.mouseMoveEvent( self, e )
if e.buttons() != QC.Qt.LeftButton:
return
my_mouse_pos = e.pos()
global_mouse_pos = self.mapToGlobal( my_mouse_pos )
tab_bar_mouse_pos = self._tab_bar.mapFromGlobal( global_mouse_pos )
if not self._tab_bar.rect().contains( tab_bar_mouse_pos ):
return
if not isinstance( self._tab_bar, TabBar ):
return
( clicked_tab_index, clicked_global_pos ) = self._tab_bar.lastClickedTabInfo()
if clicked_tab_index == -1:
return
if e.globalPos() == clicked_global_pos:
# don't start a drag until movement
return
tab_rect = self._tab_bar.tabRect( clicked_tab_index )
pixmap = QG.QPixmap( tab_rect.size() )
self._tab_bar.render( pixmap, QC.QPoint(), QG.QRegion( tab_rect ) )
mimeData = QC.QMimeData()
mimeData.setData( 'application/hydrus-tab', b'' )
drag = QG.QDrag( self._tab_bar )
drag.setMimeData( mimeData )
drag.setPixmap( pixmap )
cursor = QG.QCursor( QC.Qt.OpenHandCursor )
drag.setHotSpot( QC.QPoint( 0, 0 ) )
# this puts the tab pixmap exactly where we picked it up, but it looks bad
# drag.setHotSpot( tab_bar_mouse_pos - tab_rect.topLeft() )
drag.setDragCursor( cursor.pixmap(), QC.Qt.MoveAction )
drag.exec_( QC.Qt.MoveAction )
def dragEnterEvent( self, e ):
if self.currentWidget() and self.currentWidget().rect().contains( self.currentWidget().mapFromGlobal( self.mapToGlobal( e.pos() ) ) ):
return QW.QTabWidget.dragEnterEvent( self, e )
if 'application/hydrus-tab' in e.mimeData().formats():
e.accept()
else:
e.ignore()
def dragMoveEvent( self, event ):
#if self.currentWidget() and self.currentWidget().rect().contains( self.currentWidget().mapFromGlobal( self.mapToGlobal( event.pos() ) ) ): return QW.QTabWidget.dragMoveEvent( self, event )
screen_pos = self.mapToGlobal( event.pos() )
tab_pos = self._tab_bar.mapFromGlobal( screen_pos )
tab_index = self._tab_bar.tabAt( tab_pos )
if tab_index != -1:
shift_down = event.keyboardModifiers() & QC.Qt.ShiftModifier
self.setCurrentIndex( tab_index )
if 'application/hydrus-tab' not in event.mimeData().formats():
event.reject()
#return QW.QTabWidget.dragMoveEvent( self, event )
def dragLeaveEvent( self, e ):
#if self.currentWidget() and self.currentWidget().rect().contains( self.currentWidget().mapFromGlobal( self.mapToGlobal( e.pos() ) ) ): return QW.QTabWidget.dragLeaveEvent( self, e )
e.accept()
def addTab(self, widget, *args, **kwargs ):
if isinstance( widget, TabWidgetWithDnD ):
widget.AddSupplementaryTabBarDropTarget( self._supplementary_drop_target )
QW.QTabWidget.addTab( self, widget, *args, **kwargs )
def insertTab(self, index, widget, *args, **kwargs):
if isinstance( widget, TabWidgetWithDnD ):
widget.AddSupplementaryTabBarDropTarget( self._supplementary_drop_target )
QW.QTabWidget.insertTab( self, index, widget, *args, **kwargs )
def dropEvent( self, e ):
if self.currentWidget() and self.currentWidget().rect().contains( self.currentWidget().mapFromGlobal( self.mapToGlobal( e.pos() ) ) ):
return QW.QTabWidget.dropEvent( self, e )
if 'application/hydrus-tab' not in e.mimeData().formats(): #Page dnd has no associated mime data
e.ignore()
return
w = self
source_tab_bar = e.source()
if not isinstance( source_tab_bar, TabBar ):
return
( source_page_index, source_page_click_global_pos ) = source_tab_bar.lastClickedTabInfo()
source_tab_bar.clearLastClickedTabInfo()
source_notebook = source_tab_bar.parentWidget()
source_page = source_notebook.widget( source_page_index )
source_name = source_tab_bar.tabText( source_page_index )
while w is not None:
if source_page == w:
# you cannot drop a page of pages inside itself
return
w = w.parentWidget()
e.setDropAction( QC.Qt.MoveAction )
e.accept()
counter = self.count()
screen_pos = self.mapToGlobal( e.pos() )
tab_pos = self.tabBar().mapFromGlobal( screen_pos )
dropped_on_tab_index = self.tabBar().tabAt( tab_pos )
if source_notebook == self and dropped_on_tab_index == source_page_index:
return # if we drop on ourself, make no action, even on the right edge
dropped_on_left_edge = False
dropped_on_right_edge = False
if dropped_on_tab_index != -1:
EDGE_PADDING = 15
tab_rect = self.tabBar().tabRect( dropped_on_tab_index )
edge_size = QC.QSize( EDGE_PADDING, tab_rect.height() )
left_edge_rect = QC.QRect( tab_rect.topLeft(), edge_size )
right_edge_rect = QC.QRect( tab_rect.topRight() - QC.QPoint( EDGE_PADDING, 0 ), edge_size )
dropped_on_left_edge = left_edge_rect.contains( e.pos() )
dropped_on_right_edge = right_edge_rect.contains( e.pos() )
if counter == 0:
self.addTab( source_page, source_name )
else:
if dropped_on_tab_index == -1:
insert_index = counter
else:
insert_index = dropped_on_tab_index
if dropped_on_right_edge:
insert_index += 1
if self == source_notebook:
if insert_index == source_page_index + 1 and not dropped_on_left_edge:
pass # in this special case, moving it confidently one to the right, we will disobey the normal rules and indeed move one to the right, rather than no-op
elif insert_index > source_page_index:
# we are inserting to our right, which needs a shift since we will be removing ourselves from the list
insert_index -= 1
if source_notebook == self and insert_index == source_page_index:
return # if we mean to insert on ourself, make no action
self.insertTab( insert_index, source_page, source_name )
shift_down = e.keyboardModifiers() & QC.Qt.ShiftModifier
follow_dropped_page = not shift_down
new_options = HG.client_controller.new_options
if new_options.GetBoolean( 'reverse_page_shift_drag_behaviour' ):
follow_dropped_page = not follow_dropped_page
if follow_dropped_page:
self.setCurrentIndex( self.indexOf( source_page ) )
else:
if source_page_index > 1:
neighbour_page = source_notebook.widget( source_page_index - 1 )
page_key = neighbour_page.GetPageKey()
else:
page_key = source_notebook.GetPageKey()
HG.client_controller.gui.ShowPage( page_key )
self.pageDragAndDropped.emit( source_page, source_tab_bar )
def DeleteAllNotebookPages( notebook ):
while notebook.count() > 0:
tab = notebook.widget( 0 )
notebook.removeTab( 0 )
tab.deleteLater()
def SplitVertically( splitter: QW.QSplitter, w1, w2, hpos ):
splitter.setOrientation( QC.Qt.Horizontal )
if w1.parentWidget() != splitter:
splitter.addWidget( w1 )
w1.setVisible( True )
if w2.parentWidget() != splitter:
splitter.addWidget( w2 )
w2.setVisible( True )
total_sum = sum( splitter.sizes() )
if hpos < 0:
splitter.setSizes( [ total_sum + hpos, -hpos ] )
elif hpos > 0:
splitter.setSizes( [ hpos, total_sum - hpos ] )
def SplitHorizontally( splitter: QW.QSplitter, w1, w2, vpos ):
splitter.setOrientation( QC.Qt.Vertical )
if w1.parentWidget() != splitter:
splitter.addWidget( w1 )
w1.setVisible( True )
if w2.parentWidget() != splitter:
splitter.addWidget( w2 )
w2.setVisible( True )
total_sum = sum( splitter.sizes() )
if vpos < 0:
splitter.setSizes( [ total_sum + vpos, -vpos ] )
elif vpos > 0:
splitter.setSizes( [ vpos, total_sum - vpos ] )
def MakeQLabelWithAlignment( label, parent, align ):
res = QW.QLabel( label, parent )
res.setAlignment( align )
return res
class GridLayout( QW.QGridLayout ):
def __init__( self, cols = 1, spacing = 2 ):
QW.QGridLayout.__init__( self )
self._col_count = cols
self.setMargin( 2 )
self.setSpacing( spacing )
def GetFixedColumnCount( self ):
return self._col_count
def setMargin( self, val ):
self.setContentsMargins( val, val, val, val )
def AddToLayout( layout, item, flag = None, alignment = None ):
if isinstance( layout, GridLayout ):
cols = layout.GetFixedColumnCount()
count = layout.count()
row = math.floor( count / cols )
col = count % cols
if isinstance( item, QW.QLayout ):
layout.addLayout( item, row, col )
elif isinstance( item, QW.QWidget ):
layout.addWidget( item, row, col )
elif isinstance( item, tuple ):
spacer = QW.QPushButton()#QW.QSpacerItem( 0, 0, QW.QSizePolicy.Expanding, QW.QSizePolicy.Fixed )
layout.addWidget( spacer, row, col )
spacer.setVisible(False)
return
else:
if isinstance( item, QW.QLayout ):
layout.addLayout( item )
if alignment is not None:
layout.setAlignment( item, alignment )
elif isinstance( item, QW.QWidget ):
layout.addWidget( item )
if alignment is not None:
layout.setAlignment( item, alignment )
elif isinstance( item, tuple ):
layout.addStretch( 1 )
return
zero_border = False
if flag is None or flag == CC.FLAGS_NONE:
pass
elif flag in ( CC.FLAGS_CENTER, CC.FLAGS_ON_LEFT, CC.FLAGS_ON_RIGHT, CC.FLAGS_CENTER_PERPENDICULAR, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH ):
if flag == CC.FLAGS_CENTER:
alignment = QC.Qt.AlignVCenter | QC.Qt.AlignHCenter
if flag == CC.FLAGS_ON_LEFT:
alignment = QC.Qt.AlignLeft | QC.Qt.AlignVCenter
elif flag == CC.FLAGS_ON_RIGHT:
alignment = QC.Qt.AlignRight | QC.Qt.AlignVCenter
elif flag in ( CC.FLAGS_CENTER_PERPENDICULAR, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH ):
if isinstance( layout, QW.QHBoxLayout ):
alignment = QC.Qt.AlignVCenter
else:
alignment = QC.Qt.AlignHCenter
layout.setAlignment( item, alignment )
if flag == CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH:
if isinstance( layout, QW.QVBoxLayout ) or isinstance( layout, QW.QHBoxLayout ):
layout.setStretchFactor( item, 5 )
if isinstance( item, QW.QLayout ):
zero_border = True
elif flag in ( CC.FLAGS_EXPAND_PERPENDICULAR, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ):
if flag == CC.FLAGS_EXPAND_SIZER_PERPENDICULAR:
zero_border = True
if isinstance( item, QW.QWidget ):
if isinstance( layout, QW.QHBoxLayout ):
h_policy = QW.QSizePolicy.Fixed
v_policy = QW.QSizePolicy.Expanding
else:
h_policy = QW.QSizePolicy.Expanding
v_policy = QW.QSizePolicy.Fixed
item.setSizePolicy( h_policy, v_policy )
elif flag in ( CC.FLAGS_EXPAND_BOTH_WAYS, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS, CC.FLAGS_EXPAND_BOTH_WAYS_POLITE, CC.FLAGS_EXPAND_BOTH_WAYS_SHY ):
if flag == CC.FLAGS_EXPAND_SIZER_BOTH_WAYS:
zero_border = True
if isinstance( item, QW.QWidget ):
item.setSizePolicy( QW.QSizePolicy.Expanding, QW.QSizePolicy.Expanding )
if isinstance( layout, QW.QVBoxLayout ) or isinstance( layout, QW.QHBoxLayout ):
if flag in ( CC.FLAGS_EXPAND_BOTH_WAYS, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS ):
stretch_factor = 5
elif flag == CC.FLAGS_EXPAND_BOTH_WAYS_POLITE:
stretch_factor = 3
elif flag == CC.FLAGS_EXPAND_BOTH_WAYS_SHY:
stretch_factor = 1
layout.setStretchFactor( item, stretch_factor )
if zero_border:
margin = 0
if isinstance( item, QW.QFrame ):
margin = item.frameWidth()
item.setContentsMargins( margin, margin, margin, margin )
def ScrollAreaVisibleRect( scroll_area ):
if not scroll_area.widget(): return QC.QRect( 0, 0, 0, 0 )
rect = scroll_area.widget().visibleRegion().boundingRect()
# Do not allow it to be smaller than the scroll area's viewport size:
if rect.width() < scroll_area.viewport().width():
rect.setWidth( scroll_area.viewport().width() )
if rect.height() < scroll_area.viewport().height():
rect.setHeight( scroll_area.viewport().height() )
return rect
def AdjustOpacity( image, opacity_factor ):
new_image = QG.QImage( image.width(), image.height(), QG.QImage.Format_RGBA8888 )
new_image.fill( QC.Qt.transparent )
painter = QG.QPainter( new_image )
painter.setOpacity( opacity_factor )
painter.drawImage( 0, 0, image )
return new_image
def ToKeySequence( modifiers, key ):
if isinstance( modifiers, QC.Qt.KeyboardModifiers ):
seq_str = ''
for modifier in [ QC.Qt.ShiftModifier, QC.Qt.ControlModifier, QC.Qt.AltModifier, QC.Qt.MetaModifier, QC.Qt.KeypadModifier, QC.Qt.GroupSwitchModifier ]:
if modifiers & modifier: seq_str += QG.QKeySequence( modifier ).toString()
seq_str += QG.QKeySequence( key ).toString()
return QG.QKeySequence( seq_str )
else: return QG.QKeySequence( key + modifiers )
def AddShortcut( widget, modifier, key, callable, *args ):
shortcut = QW.QShortcut( widget )
shortcut.setKey( ToKeySequence( modifier, key ) )
shortcut.setContext( QC.Qt.WidgetWithChildrenShortcut )
shortcut.activated.connect( lambda: callable( *args ) )
class BusyCursor:
def __enter__( self ):
QW.QApplication.setOverrideCursor( QC.Qt.WaitCursor )
def __exit__( self, exc_type, exc_val, exc_tb ):
QW.QApplication.restoreOverrideCursor()
def GetBackgroundColour( widget ):
return widget.palette().color( QG.QPalette.Window )
CallAfterEventType = QC.QEvent.Type( QC.QEvent.registerEventType() )
class CallAfterEvent( QC.QEvent ):
def __init__( self, fn, *args, **kwargs ):
QC.QEvent.__init__( self, CallAfterEventType )
self._fn = fn
self._args = args
self._kwargs = kwargs
def Execute( self ):
if self._fn is not None:
self._fn( *self._args, **self._kwargs )
class CallAfterEventCatcher( QC.QObject ):
def __init__( self, parent ):
QC.QObject.__init__( self, parent )
self.installEventFilter( self )
def eventFilter( self, watched, event ):
if event.type() == CallAfterEventType and isinstance( event, CallAfterEvent ):
if HG.profile_mode:
summary = 'Profiling CallAfter Event: {}'.format( event._fn )
HydrusData.Profile( summary, 'event.Execute()', globals(), locals(), min_duration_ms = HG.callto_profile_min_job_time_ms )
else:
event.Execute()
event.accept()
return True
return False
def CallAfter( fn, *args, **kwargs ):
QW.QApplication.instance().postEvent( QW.QApplication.instance().call_after_catcher, CallAfterEvent( fn, *args, **kwargs ) )
QW.QApplication.instance().eventDispatcher().wakeUp()
def ClearLayout( layout, delete_widgets = False ):
while layout.count() > 0:
item = layout.itemAt( 0 )
if delete_widgets:
if item.widget():
item.widget().deleteLater()
elif item.layout():
ClearLayout( item.layout(), delete_widgets = True )
item.layout().deleteLater()
else:
spacer = item.layout().spacerItem()
del spacer
layout.removeItem( item )
def GetClientData( widget, idx ):
if isinstance( widget, QW.QComboBox ):
return widget.itemData( idx, QC.Qt.UserRole )
elif isinstance( widget, QW.QTreeWidget ):
return widget.topLevelItem( idx ).data( 0, QC.Qt.UserRole )
elif isinstance( widget, QW.QListWidget ):
return widget.item( idx ).data( QC.Qt.UserRole )
else:
raise ValueError( 'Unknown widget class in GetClientData' )
def Unsplit( splitter, widget ):
if widget.parentWidget() == splitter:
widget.setVisible( False )
def GetSystemColour( colour ):
return QG.QPalette().color( colour )
def CenterOnWindow( parent, window ):
parent_window = parent.window()
window.move( parent_window.frameGeometry().center() - window.rect().center() )
def ListWidgetDelete( widget, idx ):
if isinstance( idx, QC.QModelIndex ):
idx = idx.row()
if idx != -1:
item = widget.takeItem( idx )
del item
def ListWidgetGetSelection( widget ):
for i in range( widget.count() ):
if widget.item( i ).isSelected(): return i
return -1
def ListWidgetGetStrings( widget ):
strings = []
for i in range( widget.count() ):
strings.append( widget.item( i ).text() )
return strings
def ListWidgetIsSelected( widget, idx ):
if idx == -1: return False
return widget.item( idx ).isSelected()
def ListWidgetSetSelection( widget, idxs ):
widget.clearSelection()
if not isinstance( idxs, list ):
idxs = [ idxs ]
count = widget.count()
for idx in idxs:
if 0 <= idx <= count -1:
widget.item( idx ).setSelected( True )
def MakeQSpinBox( parent = None, initial = None, min = None, max = None, width = None ):
spinbox = QW.QSpinBox( parent )
if min is not None: spinbox.setMinimum( min )
if max is not None: spinbox.setMaximum( max )
if initial is not None: spinbox.setValue( initial )
if width is not None: spinbox.setMinimumWidth( width )
return spinbox
def SetInitialSize( widget, size ):
if hasattr( widget, 'SetInitialSize' ):
widget.SetInitialSize( size )
return
if isinstance( size, tuple ):
size = QC.QSize( size[0], size[1] )
if size.width() >= 0: widget.setMinimumWidth( size.width() )
if size.height() >= 0: widget.setMinimumHeight( size.height() )
def SetBackgroundColour( widget, colour ):
widget.setAutoFillBackground( True )
object_name = widget.objectName()
if not object_name:
object_name = str( id( widget ) )
widget.setObjectName( object_name )
if isinstance( colour, QG.QColor ):
widget.setStyleSheet( '#{} {{ background-color: {} }}'.format( object_name, colour.name()) )
elif isinstance( colour, tuple ):
colour = QG.QColor( *colour )
widget.setStyleSheet( '#{} {{ background-color: {} }}'.format( object_name, colour.name() ) )
else:
widget.setStyleSheet( '#{} {{ background-color: {} }}'.format( object_name, QG.QColor( colour ).name() ) )
def SetStringSelection( combobox, string ):
index = combobox.findText( string )
if index != -1:
combobox.setCurrentIndex( index )
def SetClientSize( widget, size ):
if isinstance( size, tuple ):
size = QC.QSize( size[ 0 ], size[ 1 ] )
if size.width() < 0: size.setWidth( widget.width() )
if size.height() < 0: size.setHeight( widget.height() )
widget.resize( size )
def SetMinClientSize( widget, size ):
if isinstance( size, tuple ):
size = QC.QSize( size[0], size[1] )
if size.width() >= 0: widget.setMinimumWidth( size.width() )
if size.height() >= 0: widget.setMinimumHeight( size.height() )
class StatusBar( QW.QStatusBar ):
def __init__( self, status_widths ):
QW.QStatusBar.__init__( self )
self._labels = []
for w in status_widths:
label = QW.QLabel()
self._labels.append( label )
if w < 0:
self.addWidget( label, -1 * w )
else:
label.setFixedWidth( w )
self.addWidget( label )
def SetStatusText( self, text, index, tooltip = None ):
if tooltip is None:
tooltip = text
cell = self._labels[ index ]
if cell.text() != text:
cell.setText( text )
if cell.toolTip() != tooltip:
cell.setToolTip( tooltip )
class AboutDialogInfo:
def __init__( self ):
self.name = ''
self.version = ''
self.description = ''
self.license = ''
self.developers = []
self.website = ''
def SetName( self, name ):
self.name = name
def SetVersion( self, version ):
self.version = version
def SetDescription( self, description ):
self.description = description
def SetLicense( self, license ):
self.license = license
def SetDevelopers( self, developers_list ):
self.developers = developers_list
def SetWebSite( self, url ):
self.website = url
class UIActionSimulator:
def __init__( self ):
pass
def Char( self, widget, key, text = None ):
if widget is None:
widget = QW.QApplication.focusWidget()
ev1 = QG.QKeyEvent( QC.QEvent.KeyPress, key, QC.Qt.NoModifier, text = text )
ev2 = QG.QKeyEvent( QC.QEvent.KeyRelease, key, QC.Qt.NoModifier, text = text )
QW.QApplication.instance().postEvent( widget, ev1 )
QW.QApplication.instance().postEvent( widget, ev2 )
class AboutBox( QW.QDialog ):
def __init__( self, parent, about_info ):
QW.QDialog.__init__( self, parent )
self.setWindowFlag( QC.Qt.WindowContextHelpButtonHint, on = False )
self.setAttribute( QC.Qt.WA_DeleteOnClose )
self.setWindowIcon( QG.QIcon( HG.client_controller.frame_icon_pixmap ) )
layout = QW.QVBoxLayout( self )
self.setWindowTitle( 'About ' + about_info.name )
icon_label = QW.QLabel( self )
name_label = QW.QLabel( about_info.name, self )
version_label = QW.QLabel( about_info.version, self )
tabwidget = QW.QTabWidget( self )
desc_panel = QW.QWidget( self )
desc_label = QW.QLabel( about_info.description, self )
url_label = QW.QLabel( '<a href="{0}">{0}</a>'.format( about_info.website ), self )
credits = QW.QTextEdit( self )
license = QW.QTextEdit( self )
close_button = QW.QPushButton( 'close', self )
icon_label.setPixmap( HG.client_controller.frame_icon_pixmap )
layout.addWidget( icon_label, alignment = QC.Qt.AlignHCenter )
name_label_font = name_label.font()
name_label_font.setBold( True )
name_label.setFont( name_label_font )
layout.addWidget( name_label, alignment = QC.Qt.AlignHCenter )
layout.addWidget( version_label, alignment = QC.Qt.AlignHCenter )
layout.addWidget( tabwidget, alignment = QC.Qt.AlignHCenter )
tabwidget.addTab( desc_panel, 'Description' )
tabwidget.addTab( credits, 'Credits' )
tabwidget.addTab( license, 'License' )
tabwidget.setCurrentIndex( 0 )
credits.setPlainText( 'Created by ' + ', '.join(about_info.developers) )
credits.setReadOnly( True )
credits.setAlignment( QC.Qt.AlignHCenter )
license.setPlainText( about_info.license )
license.setReadOnly( True )
desc_layout = QW.QVBoxLayout()
desc_layout.addWidget( desc_label, alignment = QC.Qt.AlignHCenter )
desc_label.setWordWrap( True )
desc_label.setAlignment( QC.Qt.AlignHCenter | QC.Qt.AlignVCenter )
desc_layout.addWidget( url_label, alignment = QC.Qt.AlignHCenter )
url_label.setTextFormat( QC.Qt.RichText )
url_label.setTextInteractionFlags( QC.Qt.TextBrowserInteraction )
url_label.setOpenExternalLinks( True )
desc_panel.setLayout( desc_layout )
layout.addWidget( close_button, alignment = QC.Qt.AlignRight )
close_button.clicked.connect( self.accept )
self.setLayout( layout )
self.exec_()
class RadioBox( QW.QFrame ):
radioBoxChanged = QC.Signal()
def __init__( self, parent = None, choices = [], vertical = False ):
QW.QFrame.__init__( self, parent )
self.setFrameStyle( QW.QFrame.Box | QW.QFrame.Raised )
if vertical:
self.setLayout( VBoxLayout() )
else:
self.setLayout( HBoxLayout() )
self._choices = []
for choice in choices:
radiobutton = QW.QRadioButton( choice, self )
self._choices.append( radiobutton )
radiobutton.clicked.connect( self.radioBoxChanged )
self.layout().addWidget( radiobutton )
if vertical and len( self._choices ):
self._choices[0].setChecked( True )
elif len( self._choices ):
self._choices[-1].setChecked( True )
def GetCurrentIndex( self ):
for i in range( len( self._choices ) ):
if self._choices[ i ].isChecked(): return i
return -1
def SetStringSelection( self, str ):
for i in range( len( self._choices ) ):
if self._choices[ i ].text() == str:
self._choices[ i ].setChecked( True )
return
def GetStringSelection( self ):
for i in range( len( self._choices ) ):
if self._choices[ i ].isChecked(): return self._choices[ i ].text()
return None
def SetValue( self, data ):
pass
def Select( self, idx ):
self._choices[ idx ].setChecked( True )
# Adapted from https://doc.qt.io/qt-5/qtwidgets-widgets-elidedlabel-example.html
class EllipsizedLabel( QW.QLabel ):
def __init__( self, parent = None, ellipsize_end = False ):
QW.QLabel.__init__( self, parent )
self._ellipsize_end = ellipsize_end
def minimumSizeHint( self ):
if self._ellipsize_end:
return self.sizeHint()
else:
return QW.QLabel.minimumSizeHint( self )
def setText( self, text ):
try:
QW.QLabel.setText( self, text )
except ValueError:
QW.QLabel.setText( self, repr( text ) )
self.update()
def sizeHint( self ):
if self._ellipsize_end:
num_lines = self.text().count( '\n' ) + 1
line_width = self.fontMetrics().lineWidth()
line_height = self.fontMetrics().lineSpacing()
size_hint = QC.QSize( 3 * line_width, num_lines * line_height )
else:
size_hint = QW.QLabel.sizeHint( self )
return size_hint
def paintEvent( self, event ):
if not self._ellipsize_end:
QW.QLabel.paintEvent( self, event )
return
painter = QG.QPainter( self )
fontMetrics = painter.fontMetrics()
text_lines = self.text().split( '\n' )
line_spacing = fontMetrics.lineSpacing()
current_y = 0
done = False
my_width = self.width()
for text_line in text_lines:
elided_line = fontMetrics.elidedText( text_line, QC.Qt.ElideRight, my_width )
x = 0
width = my_width
height = line_spacing
flags = self.alignment()
painter.drawText( x, current_y, width, height, flags, elided_line )
# old hacky line that doesn't support alignment flags
#painter.drawText( QC.QPoint( 0, current_y + fontMetrics.ascent() ), elided_line )
current_y += line_spacing
# old code that did multiline wrap width stuff
'''
text_layout = QG.QTextLayout( text_line, painter.font() )
text_layout.beginLayout()
while True:
line = text_layout.createLine()
if not line.isValid(): break
line.setLineWidth( self.width() )
next_line_y = y + line_spacing
if self.height() >= next_line_y + line_spacing:
line.draw( painter, QC.QPoint( 0, y ) )
y = next_line_y
else:
last_line = text_line[ line.textStart(): ]
elided_last_line = fontMetrics.elidedText( last_line, QC.Qt.ElideRight, self.width() )
painter.drawText( QC.QPoint( 0, y + fontMetrics.ascent() ), elided_last_line )
done = True
break
text_layout.endLayout()
if done: break
'''
class Dialog( QW.QDialog ):
def __init__( self, parent = None, **kwargs ):
title = None
if 'title' in kwargs:
title = kwargs['title']
del kwargs['title']
QW.QDialog.__init__( self, parent, **kwargs )
self.setWindowFlag( QC.Qt.WindowContextHelpButtonHint, on = False )
if title is not None:
self.setWindowTitle( title )
self._closed_by_user = False
def closeEvent( self, event ):
if event.spontaneous():
self._closed_by_user = True
QW.QDialog.closeEvent( self, event )
# True if the dialog was closed by the user clicking on the X on the titlebar (so neither reject nor accept was chosen - the dialog result is still reject in this case though)
def WasCancelled( self ):
return self._closed_by_user
def SetCancelled( self, closed ):
self._closed_by_user = closed
def __enter__( self ):
return self
def __exit__( self, exc_type, exc_val, exc_tb ):
if isValid( self ):
self.deleteLater()
class PasswordEntryDialog( Dialog ):
def __init__( self, parent, message, caption ):
Dialog.__init__( self, parent )
self.setWindowTitle( caption )
self._ok_button = QW.QPushButton( 'OK', self )
self._ok_button.clicked.connect( self.accept )
self._cancel_button = QW.QPushButton( 'Cancel', self )
self._cancel_button.clicked.connect( self.reject )
self._password = QW.QLineEdit( self )
self._password.setEchoMode( QW.QLineEdit.Password )
self.setLayout( QW.QVBoxLayout() )
entry_layout = QW.QHBoxLayout()
entry_layout.addWidget( QW.QLabel( message, self ) )
entry_layout.addWidget( self._password )
button_layout = QW.QHBoxLayout()
button_layout.addStretch( 1 )
button_layout.addWidget( self._cancel_button )
button_layout.addWidget( self._ok_button )
self.layout().addLayout( entry_layout )
self.layout().addLayout( button_layout )
def GetValue( self ):
return self._password.text()
class DirDialog( QW.QFileDialog ):
def __init__( self, parent = None, message = None ):
QW.QFileDialog.__init__( self, parent )
if message is not None: self.setWindowTitle( message )
self.setAcceptMode( QW.QFileDialog.AcceptOpen )
self.setFileMode( QW.QFileDialog.Directory )
self.setOption( QW.QFileDialog.ShowDirsOnly, True )
if HG.client_controller.new_options.GetBoolean( 'use_qt_file_dialogs' ):
self.setOption( QW.QFileDialog.DontUseNativeDialog, True )
def __enter__( self ):
return self
def __exit__( self, exc_type, exc_val, exc_tb ):
self.deleteLater()
def _GetSelectedFiles( self ):
return [ os.path.normpath( path ) for path in self.selectedFiles() ]
def GetPath(self):
sel = self._GetSelectedFiles()
if len( sel ) > 0:
return sel[0]
return None
class FileDialog( QW.QFileDialog ):
def __init__( self, parent = None, message = None, acceptMode = QW.QFileDialog.AcceptOpen, fileMode = QW.QFileDialog.ExistingFile, default_filename = None, default_directory = None, wildcard = None, defaultSuffix = None ):
QW.QFileDialog.__init__( self, parent )
if message is not None:
self.setWindowTitle( message )
self.setAcceptMode( acceptMode )
self.setFileMode( fileMode )
if default_directory is not None:
self.setDirectory( default_directory )
if defaultSuffix is not None:
self.setDefaultSuffix( defaultSuffix )
if default_filename is not None:
self.selectFile( default_filename )
if wildcard:
self.setNameFilter( wildcard )
if HG.client_controller.new_options.GetBoolean( 'use_qt_file_dialogs' ):
self.setOption( QW.QFileDialog.DontUseNativeDialog, True )
def __enter__( self ):
return self
def __exit__( self, exc_type, exc_val, exc_tb ):
self.deleteLater()
def _GetSelectedFiles( self ):
return [ os.path.normpath( path ) for path in self.selectedFiles() ]
def GetPath( self ):
sel = self._GetSelectedFiles()
if len( sel ) > 0:
return sel[ 0 ]
return None
def GetPaths( self ):
return self._GetSelectedFiles()
# A QTreeWidget where if an item is (un)checked, all its children are also (un)checked, recursively
class TreeWidgetWithInheritedCheckState( QW.QTreeWidget ):
def __init__( self, *args, **kwargs ):
QW.QTreeWidget.__init__( self, *args, **kwargs )
self.itemClicked.connect( self._HandleItemClickedForCheckStateUpdate )
def _HandleItemClickedForCheckStateUpdate( self, item, column ):
self._UpdateCheckState( item, item.checkState( 0 ) )
def _UpdateCheckState( self, item, check_state ):
# this is an int, should be a checkstate
item.setCheckState( 0, check_state )
for i in range( item.childCount() ):
self._UpdateCheckState( item.child( i ), check_state )
class ColourPickerCtrl( QW.QPushButton ):
def __init__( self, parent = None ):
QW.QPushButton.__init__( self, parent )
self._colour = QG.QColor( 0, 0, 0, 0 )
self.clicked.connect( self._ChooseColour )
self._highlighted = False
def SetColour( self, colour ):
self._colour = colour
self._UpdatePixmap()
def _UpdatePixmap( self ):
px = QG.QPixmap( self.contentsRect().height(), self.contentsRect().height() )
painter = QG.QPainter( px )
colour = self._colour
if self._highlighted:
colour = self._colour.lighter( 125 ) # 25% lighter
painter.fillRect( px.rect(), QG.QBrush( colour ) )
painter.end()
self.setIcon( QG.QIcon( px ) )
self.setIconSize( px.size() )
self.setFlat( True )
self.setFixedSize( px.size() )
def enterEvent( self, event ):
self._highlighted = True
self._UpdatePixmap()
def leaveEvent( self, event ):
self._highlighted = False
self._UpdatePixmap()
def GetColour( self ):
return self._colour
def _ChooseColour( self ):
new_colour = QW.QColorDialog.getColor( initial = self._colour )
if new_colour.isValid():
self.SetColour( new_colour )
def ListsToTuples( l ): # Since lists are not hashable, we need to (recursively) convert lists to tuples in data that is to be added to BetterListCtrl
if isinstance( l, list ) or isinstance( l, tuple ):
return tuple( map( ListsToTuples, l ) )
else:
return l
class WidgetEventFilter ( QC.QObject ):
_mouse_tracking_required = { 'EVT_MOUSE_EVENTS' }
_strong_focus_required = { 'EVT_KEY_DOWN' }
def __init__( self, parent_widget ):
self._parent_widget = parent_widget
QC.QObject.__init__( self, parent_widget )
parent_widget.installEventFilter( self )
self._callback_map = defaultdict( list )
self._user_moved_window = False # There is no EVT_MOVE_END in Qt so some trickery is required.
def _ExecuteCallbacks( self, event_name, event ):
if not event_name in self._callback_map: return
event_killed = False
for callback in self._callback_map[ event_name ]:
if not callback( event ): event_killed = True
return event_killed
def eventFilter( self, watched, event ):
# Once somehow this got called with no _parent_widget set - which is probably fixed now but leaving the check just in case, wew
# Might be worth debugging this later if it still occurs - the only way I found to reproduce it is to run the help > debug > initialize server command
if not hasattr( self, '_parent_widget') or not isValid( self._parent_widget ): return False
type = event.type()
event_killed = False
if type == QC.QEvent.KeyPress:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_KEY_DOWN', event )
elif type == QC.QEvent.Close:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_CLOSE', event )
elif type == QC.QEvent.WindowStateChange:
if isValid( self._parent_widget ):
if self._parent_widget.isMinimized() or (event.oldState() & QC.Qt.WindowMinimized): event_killed = event_killed or self._ExecuteCallbacks( 'EVT_ICONIZE', event )
if self._parent_widget.isMaximized() or (event.oldState() & QC.Qt.WindowMaximized): event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MAXIMIZE', event )
elif type == QC.QEvent.MouseMove:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOUSE_EVENTS', event )
elif type == QC.QEvent.MouseButtonDblClick:
if event.button() == QC.Qt.LeftButton:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_LEFT_DCLICK', event )
elif event.button() == QC.Qt.RightButton:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_RIGHT_DCLICK', event )
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOUSE_EVENTS', event )
elif type == QC.QEvent.MouseButtonPress:
if event.buttons() & QC.Qt.LeftButton: event_killed = event_killed or self._ExecuteCallbacks( 'EVT_LEFT_DOWN', event )
if event.buttons() & QC.Qt.MiddleButton: event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MIDDLE_DOWN', event )
if event.buttons() & QC.Qt.RightButton: event_killed = event_killed or self._ExecuteCallbacks( 'EVT_RIGHT_DOWN', event )
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOUSE_EVENTS', event )
elif type == QC.QEvent.MouseButtonRelease:
if event.buttons() & QC.Qt.LeftButton: event_killed = event_killed or self._ExecuteCallbacks( 'EVT_LEFT_UP', event )
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOUSE_EVENTS', event )
elif type == QC.QEvent.Wheel:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOUSEWHEEL', event )
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOUSE_EVENTS', event )
elif type == QC.QEvent.Scroll:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_SCROLLWIN', event )
elif type == QC.QEvent.Move:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOVE', event )
if isValid( self._parent_widget ) and self._parent_widget.isVisible():
self._user_moved_window = True
elif type == QC.QEvent.Resize:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_SIZE', event )
elif type == QC.QEvent.NonClientAreaMouseButtonPress:
self._user_moved_window = False
elif type == QC.QEvent.NonClientAreaMouseButtonRelease:
if self._user_moved_window:
event_killed = event_killed or self._ExecuteCallbacks( 'EVT_MOVE_END', event )
self._user_moved_window = False
if event_killed:
event.accept()
return True
return False
def _AddCallback( self, evt_name, callback ):
if evt_name in self._mouse_tracking_required:
self._parent_widget.setMouseTracking( True )
if evt_name in self._strong_focus_required:
self._parent_widget.setFocusPolicy( QC.Qt.StrongFocus )
self._callback_map[ evt_name ].append( callback )
def EVT_CLOSE( self, callback ):
self._AddCallback( 'EVT_CLOSE', callback )
def EVT_ICONIZE( self, callback ):
self._AddCallback( 'EVT_ICONIZE', callback )
def EVT_KEY_DOWN( self, callback ):
self._AddCallback( 'EVT_KEY_DOWN', callback )
def EVT_LEFT_DCLICK( self, callback ):
self._AddCallback( 'EVT_LEFT_DCLICK', callback )
def EVT_RIGHT_DCLICK( self, callback ):
self._AddCallback( 'EVT_RIGHT_DCLICK', callback )
def EVT_LEFT_DOWN( self, callback ):
self._AddCallback( 'EVT_LEFT_DOWN', callback )
def EVT_LEFT_UP( self, callback ):
self._AddCallback( 'EVT_LEFT_UP', callback )
def EVT_MAXIMIZE( self, callback ):
self._AddCallback( 'EVT_MAXIMIZE', callback )
def EVT_MIDDLE_DOWN( self, callback ):
self._AddCallback( 'EVT_MIDDLE_DOWN', callback )
def EVT_MOUSE_EVENTS( self, callback ):
self._AddCallback( 'EVT_MOUSE_EVENTS', callback )
def EVT_MOUSEWHEEL( self, callback ):
self._AddCallback( 'EVT_MOUSEWHEEL', callback )
def EVT_MOVE( self, callback ):
self._AddCallback( 'EVT_MOVE', callback )
def EVT_MOVE_END( self, callback ):
self._AddCallback( 'EVT_MOVE_END', callback )
def EVT_RIGHT_DOWN( self, callback ):
self._AddCallback( 'EVT_RIGHT_DOWN', callback )
def EVT_SCROLLWIN( self, callback ):
self._AddCallback( 'EVT_SCROLLWIN', callback )
def EVT_SIZE( self, callback ):
self._AddCallback( 'EVT_SIZE', callback )
# wew lad
# https://stackoverflow.com/questions/46456238/checkbox-not-visible-inside-combobox
class CheckBoxDelegate(QW.QStyledItemDelegate):
def __init__(self, parent=None):
super( CheckBoxDelegate, self ).__init__(parent)
def createEditor( self, parent, op, idx ):
self.editor = QW.QCheckBox( parent )
class CollectComboCtrl( QW.QComboBox ):
itemChanged = QC.Signal()
def __init__( self, parent, media_collect ):
QW.QComboBox.__init__( self, parent )
self.view().pressed.connect( self._HandleItemPressed )
# this was previously 'if Fusion style only', but as it works for normal styles too, it is more helpful to have it always on
self.setItemDelegate( CheckBoxDelegate() )
self.setModel( QG.QStandardItemModel( self ) )
text_and_data_tuples = set()
for media_sort in HG.client_controller.new_options.GetDefaultNamespaceSorts():
namespaces = media_sort.GetNamespaces()
try:
text_and_data_tuples.update( namespaces )
except:
HydrusData.DebugPrint( 'Bad namespaces: {}'.format( namespaces ) )
HydrusData.ShowText( 'Hey, your namespace-based sorts are likely damaged. Details have been written to the log, please let hydev know!' )
text_and_data_tuples = sorted( ( ( namespace, ( 'namespace', namespace ) ) for namespace in text_and_data_tuples ) )
ratings_services = HG.client_controller.services_manager.GetServices( ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ) )
for ratings_service in ratings_services:
text_and_data_tuples.append( ( ratings_service.GetName(), ('rating', ratings_service.GetServiceKey() ) ) )
for ( text, data ) in text_and_data_tuples:
self.Append( text, data )
# Trick to display custom text
self._cached_text = ''
if media_collect.DoesACollect():
CallAfter( self.SetCollectByValue, media_collect )
def paintEvent( self, e ):
painter = QW.QStylePainter( self )
painter.setPen( self.palette().color( QG.QPalette.Text ) )
opt = QW.QStyleOptionComboBox()
self.initStyleOption( opt )
opt.currentText = self._cached_text
painter.drawComplexControl( QW.QStyle.CC_ComboBox, opt )
painter.drawControl( QW.QStyle.CE_ComboBoxLabel, opt )
def GetValues( self ):
namespaces = []
rating_service_keys = []
for index in self.GetCheckedIndices():
(collect_type, collect_data) = self.itemData( index, QC.Qt.UserRole )
if collect_type == 'namespace':
namespaces.append( collect_data )
elif collect_type == 'rating':
rating_service_keys.append( collect_data )
collect_strings = self.GetCheckedStrings()
if len( collect_strings ) > 0:
description = 'collect by ' + '-'.join( collect_strings )
else:
description = 'no collections'
return ( namespaces, rating_service_keys, description )
def hidePopup(self):
if not self.view().underMouse():
QW.QComboBox.hidePopup( self )
def SetValue( self, text ):
self._cached_text = text
self.setCurrentText( text )
def SetCollectByValue( self, media_collect ):
try:
indices_to_check = []
for index in range( self.count() ):
( collect_type, collect_data ) = self.itemData( index, QC.Qt.UserRole )
p1 = collect_type == 'namespace' and collect_data in media_collect.namespaces
p2 = collect_type == 'rating' and collect_data in media_collect.rating_service_keys
if p1 or p2:
indices_to_check.append( index )
self.SetCheckedIndices( indices_to_check )
self.itemChanged.emit()
except Exception as e:
HydrusData.ShowText( 'Failed to set a collect-by value!' )
HydrusData.ShowException( e )
def SetCheckedIndices( self, indices_to_check ):
for idx in range( self.count() ):
item = self.model().item( idx )
if idx in indices_to_check:
item.setCheckState( QC.Qt.Checked )
else:
item.setCheckState( QC.Qt.Unchecked )
def GetCheckedIndices( self ):
indices = []
for idx in range( self.count() ):
item = self.model().item( idx )
if item.checkState() == QC.Qt.Checked: indices.append( idx )
return indices
def GetCheckedStrings( self ):
strings = [ ]
for idx in range( self.count() ):
item = self.model().item( idx )
if item.checkState() == QC.Qt.Checked: strings.append( item.text() )
return strings
def Append( self, str, data ):
self.addItem( str, userData = data )
item = self.model().item( self.count() - 1, 0 )
item.setCheckState( QC.Qt.Unchecked )
def _HandleItemPressed( self, index ):
item = self.model().itemFromIndex( index )
if item.checkState() == QC.Qt.Checked:
item.setCheckState( QC.Qt.Unchecked )
else:
item.setCheckState( QC.Qt.Checked )
self.SetValue( self._cached_text )
self.itemChanged.emit()
| 27.354631 | 226 | 0.531312 |
0795baa0013fdf8fe20c1fc4e8f1156afb059328 | 223 | py | Python | code.py | hiteshvvr/SiPINEfficiency_PENELOPE | ce213799fd622318b6aa4b8700e871773af7bd62 | [
"MIT"
] | null | null | null | code.py | hiteshvvr/SiPINEfficiency_PENELOPE | ce213799fd622318b6aa4b8700e871773af7bd62 | [
"MIT"
] | null | null | null | code.py | hiteshvvr/SiPINEfficiency_PENELOPE | ce213799fd622318b6aa4b8700e871773af7bd62 | [
"MIT"
] | null | null | null | fhand=open('answer.txt')
output=open('graphdatauvw.dat','w')
for line in fhand:
word=line[59:72]
energy=float(word)
output.write(word)
next='\n'
output.write(next)
print (word)
fhand.close()
output.close()
| 18.583333 | 36 | 0.672646 |
7a581b9432edcef76954c40ab6fef3901cd4f91d | 4,257 | py | Python | muddery/web/website/views.py | noahzaozao/muddery | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | [
"BSD-3-Clause"
] | null | null | null | muddery/web/website/views.py | noahzaozao/muddery | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | [
"BSD-3-Clause"
] | null | null | null | muddery/web/website/views.py | noahzaozao/muddery | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | [
"BSD-3-Clause"
] | null | null | null |
"""
This file contains the generic, assorted views that don't fall under one of
the other applications. Views are django's way of processing e.g. html
templates on the fly.
"""
from django.contrib.admin.sites import site
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from evennia import SESSION_HANDLER
from evennia.objects.models import ObjectDB
from evennia.accounts.models import AccountDB
from evennia.utils import logger
from django.contrib.auth import login
_BASE_CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
def _shared_login(request):
"""
Handle the shared login between website and webclient.
"""
csession = request.session
account = request.user
website_uid = csession.get("website_authenticated_uid", None)
webclient_uid = csession.get("webclient_authenticated_uid", None)
if not csession.session_key:
# this is necessary to build the sessid key
csession.save()
if account.is_authenticated():
# Logged into website
if not website_uid:
# fresh website login (just from login page)
csession["website_authenticated_uid"] = account.id
if webclient_uid is None:
# auto-login web client
csession["webclient_authenticated_uid"] = account.id
elif webclient_uid:
# Not logged into website, but logged into webclient
if not website_uid:
csession["website_authenticated_uid"] = account.id
account = AccountDB.objects.get(id=webclient_uid)
try:
# calls our custom authenticate, in web/utils/backend.py
authenticate(autologin=account)
login(request, account)
except AttributeError:
logger.log_trace()
def _gamestats():
# Some misc. configurable stuff.
# TODO: Move this to either SQL or settings.py based configuration.
fpage_account_limit = 4
# A QuerySet of the most recently connected accounts.
recent_users = AccountDB.objects.get_recently_connected_accounts()[:fpage_account_limit]
nplyrs_conn_recent = len(recent_users)
nplyrs = AccountDB.objects.num_total_accounts()
nplyrs_reg_recent = len(AccountDB.objects.get_recently_created_accounts())
nsess = SESSION_HANDLER.account_count()
# nsess = len(AccountDB.objects.get_connected_accounts()) or "no one"
nobjs = ObjectDB.objects.all().count()
nrooms = ObjectDB.objects.filter(db_location__isnull=True).exclude(db_typeclass_path=_BASE_CHAR_TYPECLASS).count()
nexits = ObjectDB.objects.filter(db_location__isnull=False, db_destination__isnull=False).count()
nchars = ObjectDB.objects.filter(db_typeclass_path=_BASE_CHAR_TYPECLASS).count()
nothers = nobjs - nrooms - nchars - nexits
pagevars = {
"page_title": "Front Page",
"players_connected_recent": recent_users,
"num_players_connected": nsess,
"num_players_registered": nplyrs,
"num_players_connected_recent": nplyrs_conn_recent,
"num_players_registered_recent": nplyrs_reg_recent,
"num_rooms": nrooms,
"num_exits": nexits,
"num_objects": nobjs,
"num_characters": nchars,
"num_others": nothers
}
return pagevars
def page_index(request):
"""
Main root page.
"""
# handle webclient-website shared login
_shared_login(request)
# get game db stats
pagevars = _gamestats()
return render(request, 'index.html', pagevars)
def to_be_implemented(request):
"""
A notice letting the user know that this particular feature hasn't been
implemented yet.
"""
pagevars = {
"page_title": "To Be Implemented...",
}
return render(request, 'tbi.html', pagevars)
@staff_member_required
def evennia_admin(request):
"""
Helpful Evennia-specific admin page.
"""
return render(
request, 'evennia_admin.html', {
'accountdb': AccountDB})
def admin_wrapper(request):
"""
Wrapper that allows us to properly use the base Django admin site, if needed.
"""
return staff_member_required(site.index)(request)
| 31.301471 | 118 | 0.693916 |
554245a8804a01f7d6f838a7f91b43b18d8f52e8 | 1,361 | py | Python | ml/rl/core/registry_meta.py | badrinarayan/ReAgent | d49b02dce53d9a5d5ee077cea7efded507677641 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/core/registry_meta.py | badrinarayan/ReAgent | d49b02dce53d9a5d5ee077cea7efded507677641 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/core/registry_meta.py | badrinarayan/ReAgent | d49b02dce53d9a5d5ee077cea7efded507677641 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import abc
import logging
from typing import Dict, Type
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class RegistryMeta(abc.ABCMeta):
def __init__(cls, name, bases, attrs):
if not hasattr(cls, "REGISTRY"):
# Put REGISTRY on cls. This only happens once on the base class
logger.info("Adding REGISTRY to type {}".format(name))
cls.REGISTRY: Dict[str, Type] = {}
cls.REGISTRY_NAME = name
if not cls.__abstractmethods__:
# Only register fully-defined classes
logger.info("Registering {} to {}".format(name, cls.REGISTRY_NAME))
cls.REGISTRY[name] = cls
else:
logger.info(
f"Not Registering {name} to {cls.REGISTRY_NAME}. Abstract "
f"method {list(cls.__abstractmethods__)} are not implemented."
)
return super().__init__(name, bases, attrs)
def create(cls, name: str, config):
assert name in cls.REGISTRY, "{} is not registered; use one of {}".format(
name, cls.REGISTRY.keys()
)
logger.info("Creating instance of {} from config: {}".format(name, config))
return cls.REGISTRY[name](config)
def create_from_union(cls, union):
return cls.create(union.selected_field, union.value)
| 34.897436 | 83 | 0.618663 |
5491d61e1d977c0e9e2fe51a1a0835e3e3a5555e | 1,734 | py | Python | data/somethingv1.py | ZhangHerman/R-S-R | d6a9260e078c49bb94ce7dbf794d61ab13731dd1 | [
"Apache-2.0"
] | null | null | null | data/somethingv1.py | ZhangHerman/R-S-R | d6a9260e078c49bb94ce7dbf794d61ab13731dd1 | [
"Apache-2.0"
] | null | null | null | data/somethingv1.py | ZhangHerman/R-S-R | d6a9260e078c49bb94ce7dbf794d61ab13731dd1 | [
"Apache-2.0"
] | null | null | null | import os
if __name__ == '__main__':
dataset_name = '/raid/ZYK-something-v1/something-v1/something-something-v1' # 'jester-v1'
with open('%s-labels.csv' % dataset_name) as f:
lines = f.readlines()
categories = []
for line in lines:
line = line.rstrip()
categories.append(line)
categories = sorted(categories)
with open('category.txt', 'w') as f:
f.write('\n'.join(categories))
dict_categories = {}
for i, category in enumerate(categories):
dict_categories[category] = i
files_input = ['%s-validation.csv' % dataset_name, '%s-train.csv' % dataset_name]
files_output = ['val_videofolder.txt', 'train_videofolder.txt']
for (filename_input, filename_output) in zip(files_input, files_output):
with open(filename_input) as f:
lines = f.readlines()
folders = []
idx_categories = []
for line in lines:
line = line.rstrip()
items = line.split(';')
folders.append(items[0])
idx_categories.append(dict_categories[items[1]])
output = []
for i in range(len(folders)):
curFolder = folders[i]
curIDX = idx_categories[i]
# counting the number of frames in each video folders
dir_files = os.listdir(os.path.join('/raid/ZYK-something-v1/something-v1/20bn-something-something-v1', curFolder))
output.append('%s %d %d' % ('/raid/ZYK-something-v1/something-v1/20bn-something-something-v1/' + curFolder, len(dir_files), curIDX))
print('%d/%d' % (i, len(folders)))
with open(filename_output, 'w') as f:
f.write('\n'.join(output)) | 43.35 | 145 | 0.593426 |
7c2b00af366ac16f0579764bb79782a4264f7ce7 | 47,630 | py | Python | manim/mobject/types/vectorized_mobject.py | naveen521kk/manimce-deprecated | 52a0cb0e49f79cb48f78b51c724f049d522fc465 | [
"MIT"
] | 1 | 2021-10-17T16:28:16.000Z | 2021-10-17T16:28:16.000Z | manim/mobject/types/vectorized_mobject.py | naveen521kk/manimce-deprecated | 52a0cb0e49f79cb48f78b51c724f049d522fc465 | [
"MIT"
] | 1 | 2020-11-01T03:27:09.000Z | 2020-11-01T03:27:09.000Z | manim/mobject/types/vectorized_mobject.py | naveen521kk/manimce-deprecated | 52a0cb0e49f79cb48f78b51c724f049d522fc465 | [
"MIT"
] | null | null | null | """Mobjects that use vector graphics."""
__all__ = [
"VMobject",
"VGroup",
"VDict",
"VectorizedPoint",
"CurvesAsSubmobjects",
"DashedVMobject",
]
import itertools as it
import sys
import colour
from ...constants import *
from ...mobject.mobject import Mobject
from ...mobject.three_d_utils import get_3d_vmob_gradient_start_and_end_points
from ...utils.bezier import bezier
from ...utils.bezier import get_smooth_handle_points
from ...utils.bezier import interpolate
from ...utils.bezier import integer_interpolate
from ...utils.bezier import partial_bezier_points
from ...utils.color import color_to_rgba, BLACK, WHITE
from ...utils.iterables import make_even
from ...utils.iterables import stretch_array_to_length
from ...utils.iterables import tuplify
from ...utils.simple_functions import clip_in_place
from ...utils.space_ops import rotate_vector
from ...utils.space_ops import get_norm
from ...utils.space_ops import shoelace_direction
# TODO
# - Change cubic curve groups to have 4 points instead of 3
# - Change sub_path idea accordingly
# - No more mark_paths_closed, instead have the camera test
# if last point in close to first point
# - Think about length of self.points. Always 0 or 1 mod 4?
# That's kind of weird.
class VMobject(Mobject):
CONFIG = {
"fill_color": None,
"fill_opacity": 0.0,
"stroke_color": None,
"stroke_opacity": 1.0,
"stroke_width": DEFAULT_STROKE_WIDTH,
# The purpose of background stroke is to have
# something that won't overlap the fill, e.g.
# For text against some textured background
"background_stroke_color": BLACK,
"background_stroke_opacity": 1.0,
"background_stroke_width": 0,
# When a color c is set, there will be a second color
# computed based on interpolating c to WHITE by with
# sheen_factor, and the display will gradient to this
# secondary color in the direction of sheen_direction.
"sheen_factor": 0.0,
"sheen_direction": UL,
# Indicates that it will not be displayed, but
# that it should count in parent mobject's path
"close_new_points": False,
"pre_function_handle_to_anchor_scale_factor": 0.01,
"make_smooth_after_applying_functions": False,
"background_image_file": None,
"shade_in_3d": False,
# This is within a pixel
# TODO, do we care about accounting for
# varying zoom levels?
"tolerance_for_point_equality": 1e-6,
"n_points_per_cubic_curve": 4,
}
def get_group_class(self):
return VGroup
# Colors
def init_colors(self):
self.set_fill(
color=self.fill_color or self.color,
opacity=self.fill_opacity,
)
self.set_stroke(
color=self.stroke_color or self.color,
width=self.stroke_width,
opacity=self.stroke_opacity,
)
self.set_background_stroke(
color=self.background_stroke_color,
width=self.background_stroke_width,
opacity=self.background_stroke_opacity,
)
self.set_sheen(
factor=self.sheen_factor,
direction=self.sheen_direction,
)
return self
def generate_rgbas_array(self, color, opacity):
"""
First arg can be either a color, or a tuple/list of colors.
Likewise, opacity can either be a float, or a tuple of floats.
If self.sheen_factor is not zero, and only
one color was passed in, a second slightly light color
will automatically be added for the gradient
"""
colors = list(tuplify(color))
opacities = list(tuplify(opacity))
rgbas = np.array(
[color_to_rgba(c, o) for c, o in zip(*make_even(colors, opacities))]
)
sheen_factor = self.get_sheen_factor()
if sheen_factor != 0 and len(rgbas) == 1:
light_rgbas = np.array(rgbas)
light_rgbas[:, :3] += sheen_factor
clip_in_place(light_rgbas, 0, 1)
rgbas = np.append(rgbas, light_rgbas, axis=0)
return rgbas
def update_rgbas_array(self, array_name, color=None, opacity=None):
passed_color = color if (color is not None) else BLACK
passed_opacity = opacity if (opacity is not None) else 0
rgbas = self.generate_rgbas_array(passed_color, passed_opacity)
if not hasattr(self, array_name):
setattr(self, array_name, rgbas)
return self
# Match up current rgbas array with the newly calculated
# one. 99% of the time they'll be the same.
curr_rgbas = getattr(self, array_name)
if len(curr_rgbas) < len(rgbas):
curr_rgbas = stretch_array_to_length(curr_rgbas, len(rgbas))
setattr(self, array_name, curr_rgbas)
elif len(rgbas) < len(curr_rgbas):
rgbas = stretch_array_to_length(rgbas, len(curr_rgbas))
# Only update rgb if color was not None, and only
# update alpha channel if opacity was passed in
if color is not None:
curr_rgbas[:, :3] = rgbas[:, :3]
if opacity is not None:
curr_rgbas[:, 3] = rgbas[:, 3]
return self
def set_fill(self, color=None, opacity=None, family=True):
if family:
for submobject in self.submobjects:
submobject.set_fill(color, opacity, family)
self.update_rgbas_array("fill_rgbas", color, opacity)
return self
def set_stroke(
self, color=None, width=None, opacity=None, background=False, family=True
):
if family:
for submobject in self.submobjects:
submobject.set_stroke(color, width, opacity, background, family)
if background:
array_name = "background_stroke_rgbas"
width_name = "background_stroke_width"
else:
array_name = "stroke_rgbas"
width_name = "stroke_width"
self.update_rgbas_array(array_name, color, opacity)
if width is not None:
setattr(self, width_name, width)
return self
def set_background_stroke(self, **kwargs):
kwargs["background"] = True
self.set_stroke(**kwargs)
return self
def set_style(
self,
fill_color=None,
fill_opacity=None,
stroke_color=None,
stroke_width=None,
stroke_opacity=None,
background_stroke_color=None,
background_stroke_width=None,
background_stroke_opacity=None,
sheen_factor=None,
sheen_direction=None,
background_image_file=None,
family=True,
):
self.set_fill(color=fill_color, opacity=fill_opacity, family=family)
self.set_stroke(
color=stroke_color,
width=stroke_width,
opacity=stroke_opacity,
family=family,
)
self.set_background_stroke(
color=background_stroke_color,
width=background_stroke_width,
opacity=background_stroke_opacity,
family=family,
)
if sheen_factor:
self.set_sheen(
factor=sheen_factor,
direction=sheen_direction,
family=family,
)
if background_image_file:
self.color_using_background_image(background_image_file)
return self
def get_style(self, simple=False):
ret = {
"stroke_opacity": self.get_stroke_opacity(),
"stroke_width": self.get_stroke_width(),
}
if simple:
ret["fill_color"] = colour.rgb2hex(self.get_fill_color().get_rgb())
ret["fill_opacity"] = self.get_fill_opacity()
ret["stroke_color"] = colour.rgb2hex(self.get_stroke_color().get_rgb())
else:
ret["fill_color"] = self.get_fill_colors()
ret["fill_opacity"] = self.get_fill_opacities()
ret["stroke_color"] = self.get_stroke_colors()
ret["background_stroke_color"] = self.get_stroke_colors(background=True)
ret["background_stroke_width"] = self.get_stroke_width(background=True)
ret["background_stroke_opacity"] = self.get_stroke_opacity(background=True)
ret["sheen_factor"] = self.get_sheen_factor()
ret["sheen_direction"] = self.get_sheen_direction()
ret["background_image_file"] = self.get_background_image_file()
return ret
def match_style(self, vmobject, family=True):
self.set_style(**vmobject.get_style(), family=False)
if family:
# Does its best to match up submobject lists, and
# match styles accordingly
submobs1, submobs2 = self.submobjects, vmobject.submobjects
if len(submobs1) == 0:
return self
elif len(submobs2) == 0:
submobs2 = [vmobject]
for sm1, sm2 in zip(*make_even(submobs1, submobs2)):
sm1.match_style(sm2)
return self
def set_color(self, color, family=True):
self.set_fill(color, family=family)
self.set_stroke(color, family=family)
self.color = colour.Color(color)
return self
def set_opacity(self, opacity, family=True):
self.set_fill(opacity=opacity, family=family)
self.set_stroke(opacity=opacity, family=family)
self.set_stroke(opacity=opacity, family=family, background=True)
return self
def fade(self, darkness=0.5, family=True):
factor = 1.0 - darkness
self.set_fill(
opacity=factor * self.get_fill_opacity(),
family=False,
)
self.set_stroke(
opacity=factor * self.get_stroke_opacity(),
family=False,
)
self.set_background_stroke(
opacity=factor * self.get_stroke_opacity(background=True),
family=False,
)
super().fade(darkness, family)
return self
def get_fill_rgbas(self):
try:
return self.fill_rgbas
except AttributeError:
return np.zeros((1, 4))
def get_fill_color(self):
"""
If there are multiple colors (for gradient)
this returns the first one
"""
return self.get_fill_colors()[0]
def get_fill_opacity(self):
"""
If there are multiple opacities, this returns the
first
"""
return self.get_fill_opacities()[0]
def get_fill_colors(self):
return [colour.Color(rgb=rgba[:3]) for rgba in self.get_fill_rgbas()]
def get_fill_opacities(self):
return self.get_fill_rgbas()[:, 3]
def get_stroke_rgbas(self, background=False):
try:
if background:
rgbas = self.background_stroke_rgbas
else:
rgbas = self.stroke_rgbas
return rgbas
except AttributeError:
return np.zeros((1, 4))
def get_stroke_color(self, background=False):
return self.get_stroke_colors(background)[0]
def get_stroke_width(self, background=False):
if background:
width = self.background_stroke_width
else:
width = self.stroke_width
return max(0, width)
def get_stroke_opacity(self, background=False):
return self.get_stroke_opacities(background)[0]
def get_stroke_colors(self, background=False):
return [
colour.Color(rgb=rgba[:3]) for rgba in self.get_stroke_rgbas(background)
]
def get_stroke_opacities(self, background=False):
return self.get_stroke_rgbas(background)[:, 3]
def get_color(self):
if np.all(self.get_fill_opacities() == 0):
return self.get_stroke_color()
return self.get_fill_color()
def set_sheen_direction(self, direction, family=True):
direction = np.array(direction)
if family:
for submob in self.get_family():
submob.sheen_direction = direction
else:
self.sheen_direction = direction
return self
def set_sheen(self, factor, direction=None, family=True):
if family:
for submob in self.submobjects:
submob.set_sheen(factor, direction, family)
self.sheen_factor = factor
if direction is not None:
# family set to false because recursion will
# already be handled above
self.set_sheen_direction(direction, family=False)
# Reset color to put sheen_factor into effect
if factor != 0:
self.set_stroke(self.get_stroke_color(), family=family)
self.set_fill(self.get_fill_color(), family=family)
return self
def get_sheen_direction(self):
return np.array(self.sheen_direction)
def get_sheen_factor(self):
return self.sheen_factor
def get_gradient_start_and_end_points(self):
if self.shade_in_3d:
return get_3d_vmob_gradient_start_and_end_points(self)
else:
direction = self.get_sheen_direction()
c = self.get_center()
bases = np.array(
[self.get_edge_center(vect) - c for vect in [RIGHT, UP, OUT]]
).transpose()
offset = np.dot(bases, direction)
return (c - offset, c + offset)
def color_using_background_image(self, background_image_file):
self.background_image_file = background_image_file
self.set_color(WHITE)
for submob in self.submobjects:
submob.color_using_background_image(background_image_file)
return self
def get_background_image_file(self):
return self.background_image_file
def match_background_image_file(self, vmobject):
self.color_using_background_image(vmobject.get_background_image_file())
return self
def set_shade_in_3d(self, value=True, z_index_as_group=False):
for submob in self.get_family():
submob.shade_in_3d = value
if z_index_as_group:
submob.z_index_group = self
return self
# Points
def set_points(self, points):
self.points = np.array(points)
return self
def get_points(self):
return np.array(self.points)
def set_anchors_and_handles(self, anchors1, handles1, handles2, anchors2):
assert len(anchors1) == len(handles1) == len(handles2) == len(anchors2)
nppcc = self.n_points_per_cubic_curve # 4
total_len = nppcc * len(anchors1)
self.points = np.zeros((total_len, self.dim))
arrays = [anchors1, handles1, handles2, anchors2]
for index, array in enumerate(arrays):
self.points[index::nppcc] = array
return self
def clear_points(self):
self.points = np.zeros((0, self.dim))
def append_points(self, new_points):
# TODO, check that number new points is a multiple of 4?
# or else that if len(self.points) % 4 == 1, then
# len(new_points) % 4 == 3?
self.points = np.append(self.points, new_points, axis=0)
return self
def start_new_path(self, point):
# TODO, make sure that len(self.points) % 4 == 0?
self.append_points([point])
return self
def add_cubic_bezier_curve(self, anchor1, handle1, handle2, anchor2):
# TODO, check the len(self.points) % 4 == 0?
self.append_points([anchor1, handle1, handle2, anchor2])
def add_cubic_bezier_curve_to(self, handle1, handle2, anchor):
"""
Add cubic bezier curve to the path.
"""
self.throw_error_if_no_points()
new_points = [handle1, handle2, anchor]
if self.has_new_path_started():
self.append_points(new_points)
else:
self.append_points([self.get_last_point()] + new_points)
def add_line_to(self, point):
nppcc = self.n_points_per_cubic_curve
self.add_cubic_bezier_curve_to(
*[
interpolate(self.get_last_point(), point, a)
for a in np.linspace(0, 1, nppcc)[1:]
]
)
return self
def add_smooth_curve_to(self, *points):
"""
If two points are passed in, the first is intepretted
as a handle, the second as an anchor
"""
if len(points) == 1:
handle2 = None
new_anchor = points[0]
elif len(points) == 2:
handle2, new_anchor = points
else:
name = sys._getframe(0).f_code.co_name
raise ValueError("Only call {} with 1 or 2 points".format(name))
if self.has_new_path_started():
self.add_line_to(new_anchor)
else:
self.throw_error_if_no_points()
last_h2, last_a2 = self.points[-2:]
last_tangent = last_a2 - last_h2
handle1 = last_a2 + last_tangent
if handle2 is None:
to_anchor_vect = new_anchor - last_a2
new_tangent = rotate_vector(last_tangent, PI, axis=to_anchor_vect)
handle2 = new_anchor - new_tangent
self.append_points([last_a2, handle1, handle2, new_anchor])
return self
def has_new_path_started(self):
nppcc = self.n_points_per_cubic_curve # 4
return len(self.points) % nppcc == 1
def get_last_point(self):
return self.points[-1]
def is_closed(self):
return self.consider_points_equals(self.points[0], self.points[-1])
def add_points_as_corners(self, points):
for point in points:
self.add_line_to(point)
return points
def set_points_as_corners(self, points):
nppcc = self.n_points_per_cubic_curve
points = np.array(points)
self.set_anchors_and_handles(
*[interpolate(points[:-1], points[1:], a) for a in np.linspace(0, 1, nppcc)]
)
return self
def set_points_smoothly(self, points):
self.set_points_as_corners(points)
self.make_smooth()
return self
def change_anchor_mode(self, mode):
assert mode in ["jagged", "smooth"]
nppcc = self.n_points_per_cubic_curve
for submob in self.family_members_with_points():
subpaths = submob.get_subpaths()
submob.clear_points()
for subpath in subpaths:
anchors = np.append(subpath[::nppcc], subpath[-1:], 0)
if mode == "smooth":
h1, h2 = get_smooth_handle_points(anchors)
elif mode == "jagged":
a1 = anchors[:-1]
a2 = anchors[1:]
h1 = interpolate(a1, a2, 1.0 / 3)
h2 = interpolate(a1, a2, 2.0 / 3)
new_subpath = np.array(subpath)
new_subpath[1::nppcc] = h1
new_subpath[2::nppcc] = h2
submob.append_points(new_subpath)
return self
def make_smooth(self):
return self.change_anchor_mode("smooth")
def make_jagged(self):
return self.change_anchor_mode("jagged")
def add_subpath(self, points):
assert len(points) % 4 == 0
self.points = np.append(self.points, points, axis=0)
return self
def append_vectorized_mobject(self, vectorized_mobject):
new_points = list(vectorized_mobject.points)
if self.has_new_path_started():
# Remove last point, which is starting
# a new path
self.points = self.points[:-1]
self.append_points(new_points)
def apply_function(self, function):
factor = self.pre_function_handle_to_anchor_scale_factor
self.scale_handle_to_anchor_distances(factor)
Mobject.apply_function(self, function)
self.scale_handle_to_anchor_distances(1.0 / factor)
if self.make_smooth_after_applying_functions:
self.make_smooth()
return self
def scale_handle_to_anchor_distances(self, factor):
"""
If the distance between a given handle point H and its associated
anchor point A is d, then it changes H to be a distances factor*d
away from A, but so that the line from A to H doesn't change.
This is mostly useful in the context of applying a (differentiable)
function, to preserve tangency properties. One would pull all the
handles closer to their anchors, apply the function then push them out
again.
"""
for submob in self.family_members_with_points():
if len(submob.points) < self.n_points_per_cubic_curve:
continue
a1, h1, h2, a2 = submob.get_anchors_and_handles()
a1_to_h1 = h1 - a1
a2_to_h2 = h2 - a2
new_h1 = a1 + factor * a1_to_h1
new_h2 = a2 + factor * a2_to_h2
submob.set_anchors_and_handles(a1, new_h1, new_h2, a2)
return self
#
def consider_points_equals(self, p0, p1):
return np.allclose(p0, p1, atol=self.tolerance_for_point_equality)
def consider_points_equals_2d(self, p0, p1):
"""
Determine if two points are close enough to be considered equal.
This uses the algorithm from np.isclose(), but expanded here for the
2D point case. NumPy is overkill for such a small question.
"""
rtol = 1.0e-5 # default from np.isclose()
atol = self.tolerance_for_point_equality
if abs(p0[0] - p1[0]) > atol + rtol * abs(p1[0]):
return False
if abs(p0[1] - p1[1]) > atol + rtol * abs(p1[1]):
return False
return True
# Information about line
def get_cubic_bezier_tuples_from_points(self, points):
return np.array(list(self.gen_cubic_bezier_tuples_from_points(points)))
def gen_cubic_bezier_tuples_from_points(self, points):
"""
Get a generator for the cubic bezier tuples of this object.
Generator to not materialize a list or np.array needlessly.
"""
nppcc = VMobject.CONFIG["n_points_per_cubic_curve"]
remainder = len(points) % nppcc
points = points[: len(points) - remainder]
return (points[i : i + nppcc] for i in range(0, len(points), nppcc))
def get_cubic_bezier_tuples(self):
return self.get_cubic_bezier_tuples_from_points(self.get_points())
def _gen_subpaths_from_points(self, points, filter_func):
nppcc = self.n_points_per_cubic_curve
split_indices = filter(filter_func, range(nppcc, len(points), nppcc))
split_indices = [0] + list(split_indices) + [len(points)]
return (
points[i1:i2]
for i1, i2 in zip(split_indices, split_indices[1:])
if (i2 - i1) >= nppcc
)
def get_subpaths_from_points(self, points):
return list(
self._gen_subpaths_from_points(
points,
lambda n: not self.consider_points_equals(points[n - 1], points[n]),
)
)
def gen_subpaths_from_points_2d(self, points):
return self._gen_subpaths_from_points(
points,
lambda n: not self.consider_points_equals_2d(points[n - 1], points[n]),
)
def get_subpaths(self):
return self.get_subpaths_from_points(self.get_points())
def get_nth_curve_points(self, n):
assert n < self.get_num_curves()
nppcc = self.n_points_per_cubic_curve
return self.points[nppcc * n : nppcc * (n + 1)]
def get_nth_curve_function(self, n):
return bezier(self.get_nth_curve_points(n))
def get_num_curves(self):
nppcc = self.n_points_per_cubic_curve
return len(self.points) // nppcc
def point_from_proportion(self, alpha):
num_cubics = self.get_num_curves()
n, residue = integer_interpolate(0, num_cubics, alpha)
curve = self.get_nth_curve_function(n)
return curve(residue)
def get_anchors_and_handles(self):
"""
returns anchors1, handles1, handles2, anchors2,
where (anchors1[i], handles1[i], handles2[i], anchors2[i])
will be four points defining a cubic bezier curve
for any i in range(0, len(anchors1))
"""
nppcc = self.n_points_per_cubic_curve
return [self.points[i::nppcc] for i in range(nppcc)]
def get_start_anchors(self):
return self.points[0 :: self.n_points_per_cubic_curve]
def get_end_anchors(self):
nppcc = self.n_points_per_cubic_curve
return self.points[nppcc - 1 :: nppcc]
def get_anchors(self):
if self.points.shape[0] == 1:
return self.points
return np.array(
list(
it.chain(
*zip(
self.get_start_anchors(),
self.get_end_anchors(),
)
)
)
)
def get_points_defining_boundary(self):
return np.array(list(it.chain(*[sm.get_anchors() for sm in self.get_family()])))
def get_arc_length(self, n_sample_points=None):
if n_sample_points is None:
n_sample_points = 4 * self.get_num_curves() + 1
points = np.array(
[self.point_from_proportion(a) for a in np.linspace(0, 1, n_sample_points)]
)
diffs = points[1:] - points[:-1]
norms = np.apply_along_axis(get_norm, 1, diffs)
return np.sum(norms)
# Alignment
def align_points(self, vmobject):
self.align_rgbas(vmobject)
if self.get_num_points() == vmobject.get_num_points():
return
for mob in self, vmobject:
# If there are no points, add one to
# whereever the "center" is
if mob.has_no_points():
mob.start_new_path(mob.get_center())
# If there's only one point, turn it into
# a null curve
if mob.has_new_path_started():
mob.add_line_to(mob.get_last_point())
# Figure out what the subpaths are, and align
subpaths1 = self.get_subpaths()
subpaths2 = vmobject.get_subpaths()
n_subpaths = max(len(subpaths1), len(subpaths2))
# Start building new ones
new_path1 = np.zeros((0, self.dim))
new_path2 = np.zeros((0, self.dim))
nppcc = self.n_points_per_cubic_curve
def get_nth_subpath(path_list, n):
if n >= len(path_list):
# Create a null path at the very end
return [path_list[-1][-1]] * nppcc
return path_list[n]
for n in range(n_subpaths):
sp1 = get_nth_subpath(subpaths1, n)
sp2 = get_nth_subpath(subpaths2, n)
diff1 = max(0, (len(sp2) - len(sp1)) // nppcc)
diff2 = max(0, (len(sp1) - len(sp2)) // nppcc)
sp1 = self.insert_n_curves_to_point_list(diff1, sp1)
sp2 = self.insert_n_curves_to_point_list(diff2, sp2)
new_path1 = np.append(new_path1, sp1, axis=0)
new_path2 = np.append(new_path2, sp2, axis=0)
self.set_points(new_path1)
vmobject.set_points(new_path2)
return self
def insert_n_curves(self, n):
new_path_point = None
if self.has_new_path_started():
new_path_point = self.get_last_point()
new_points = self.insert_n_curves_to_point_list(n, self.get_points())
self.set_points(new_points)
if new_path_point is not None:
self.append_points([new_path_point])
return self
def insert_n_curves_to_point_list(self, n, points):
if len(points) == 1:
nppcc = self.n_points_per_cubic_curve
return np.repeat(points, nppcc * n, 0)
bezier_quads = self.get_cubic_bezier_tuples_from_points(points)
curr_num = len(bezier_quads)
target_num = curr_num + n
# This is an array with values ranging from 0
# up to curr_num, with repeats such that
# it's total length is target_num. For example,
# with curr_num = 10, target_num = 15, this would
# be [0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9]
repeat_indices = (np.arange(target_num) * curr_num) // target_num
# If the nth term of this list is k, it means
# that the nth curve of our path should be split
# into k pieces. In the above example, this would
# be [2, 1, 2, 1, 2, 1, 2, 1, 2, 1]
split_factors = [sum(repeat_indices == i) for i in range(curr_num)]
new_points = np.zeros((0, self.dim))
for quad, sf in zip(bezier_quads, split_factors):
# What was once a single cubic curve defined
# by "quad" will now be broken into sf
# smaller cubic curves
alphas = np.linspace(0, 1, sf + 1)
for a1, a2 in zip(alphas, alphas[1:]):
new_points = np.append(
new_points, partial_bezier_points(quad, a1, a2), axis=0
)
return new_points
def align_rgbas(self, vmobject):
attrs = ["fill_rgbas", "stroke_rgbas", "background_stroke_rgbas"]
for attr in attrs:
a1 = getattr(self, attr)
a2 = getattr(vmobject, attr)
if len(a1) > len(a2):
new_a2 = stretch_array_to_length(a2, len(a1))
setattr(vmobject, attr, new_a2)
elif len(a2) > len(a1):
new_a1 = stretch_array_to_length(a1, len(a2))
setattr(self, attr, new_a1)
return self
def get_point_mobject(self, center=None):
if center is None:
center = self.get_center()
point = VectorizedPoint(center)
point.match_style(self)
return point
def interpolate_color(self, mobject1, mobject2, alpha):
attrs = [
"fill_rgbas",
"stroke_rgbas",
"background_stroke_rgbas",
"stroke_width",
"background_stroke_width",
"sheen_direction",
"sheen_factor",
]
for attr in attrs:
setattr(
self,
attr,
interpolate(getattr(mobject1, attr), getattr(mobject2, attr), alpha),
)
if alpha == 1.0:
setattr(self, attr, getattr(mobject2, attr))
def pointwise_become_partial(self, vmobject, a, b):
assert isinstance(vmobject, VMobject)
# Partial curve includes three portions:
# - A middle section, which matches the curve exactly
# - A start, which is some ending portion of an inner cubic
# - An end, which is the starting portion of a later inner cubic
if a <= 0 and b >= 1:
self.set_points(vmobject.points)
return self
bezier_quads = vmobject.get_cubic_bezier_tuples()
num_cubics = len(bezier_quads)
lower_index, lower_residue = integer_interpolate(0, num_cubics, a)
upper_index, upper_residue = integer_interpolate(0, num_cubics, b)
self.clear_points()
if num_cubics == 0:
return self
if lower_index == upper_index:
self.append_points(
partial_bezier_points(
bezier_quads[lower_index], lower_residue, upper_residue
)
)
else:
self.append_points(
partial_bezier_points(bezier_quads[lower_index], lower_residue, 1)
)
for quad in bezier_quads[lower_index + 1 : upper_index]:
self.append_points(quad)
self.append_points(
partial_bezier_points(bezier_quads[upper_index], 0, upper_residue)
)
return self
def get_subcurve(self, a, b):
vmob = self.copy()
vmob.pointwise_become_partial(self, a, b)
return vmob
def get_direction(self):
"""Uses :func:`~.space_ops.shoelace_direction` to calculate the direction.
The direction of points determines in which direction the
object is drawn, clockwise or counterclockwise.
Examples
--------
The default direction of a :class:`~.Circle` is counterclockwise::
>>> from manim import Circle
>>> Circle().get_direction()
'CCW'
Returns
-------
:class:`str`
Either ``"CW"`` or ``"CCW"``.
"""
return shoelace_direction(self.get_start_anchors())
def reverse_direction(self):
"""Reverts the point direction by inverting the point order.
Returns
-------
:class:`VMobject`
Returns self.
Examples
--------
.. manim:: ChangeOfDirection
class ChangeOfDirection(Scene):
def construct(self):
ccw = RegularPolygon(5)
ccw.shift(LEFT).rotate
cw = RegularPolygon(5)
cw.shift(RIGHT).reverse_direction()
self.play(ShowCreation(ccw), ShowCreation(cw),
run_time=4)
"""
self.points = self.points[::-1]
return self
def force_direction(self, target_direction):
"""Makes sure that points are either directed clockwise or
counterclockwise.
Parameters
----------
target_direction : :class:`str`
Either ``"CW"`` or ``"CCW"``.
"""
if target_direction not in ("CW", "CCW"):
raise ValueError('Invalid input for force_direction. Use "CW" or "CCW"')
if self.get_direction() != target_direction:
# Since we already assured the input is CW or CCW,
# and the directions don't match, we just reverse
self.reverse_direction()
return self
class VGroup(VMobject):
"""A group of vectorized mobjects.
This can be used to group multiple :class:`~.VMobject` instances together
in order to scale, move, ... them together.
Examples
--------
.. manim:: ArcShapeIris
:save_last_frame:
class ArcShapeIris(Scene):
def construct(self):
colors = [DARK_BLUE, DARK_BROWN, BLUE_E, BLUE_D, BLUE_A, TEAL_B, GREEN_B, YELLOW_E]
radius = [1 + rad * 0.1 for rad in range(len(colors))]
circles_group = VGroup()
# zip(radius, color) makes the iterator [(radius[i], color[i]) for i in range(radius)]
circles_group.add(*[Circle(radius=rad, stroke_width=10, color=col)
for rad, col in zip(radius, colors)])
self.add(circles_group)
"""
def __init__(self, *vmobjects, **kwargs):
VMobject.__init__(self, **kwargs)
self.add(*vmobjects)
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ ", ".join(str(mob) for mob in self.submobjects)
+ ")"
)
def add(self, *vmobjects):
"""Checks if all passed elements are an instance of VMobject and then add them to submobjects
Parameters
----------
vmobjects : :class:`~.VMobject`
List of VMobject to add
Returns
-------
None
Raises
------
TypeError
If one element of the list is not an instance of VMobject
"""
if not all(isinstance(m, VMobject) for m in vmobjects):
raise TypeError("All submobjects must be of type VMobject")
super().add(*vmobjects)
class VDict(VMobject):
"""A VGroup-like class, also offering submobject access by
key, like a python dict
Parameters
----------
mapping_or_iterable : Union[:class:`Mapping`, Iterable[Tuple[Hashable, :class:`~.VMobject`]]], optional
The parameter specifying the key-value mapping of keys and mobjects.
show_keys : :class:`bool`, optional
Whether to also display the key associated with
the mobject. This might be useful when debugging,
especially when there are a lot of mobjects in the
:class:`VDict`. Defaults to False.
kwargs : Any
Other arguments to be passed to `Mobject` or the CONFIG.
Attributes
----------
show_keys : :class:`bool`
Whether to also display the key associated with
the mobject. This might be useful when debugging,
especially when there are a lot of mobjects in the
:class:`VDict`. When displayed, the key is towards
the left of the mobject.
Defaults to False.
submob_dict : :class:`dict`
Is the actual python dictionary that is used to bind
the keys to the mobjects.
Examples
--------
.. manim:: ShapesWithVDict
class ShapesWithVDict(Scene):
def construct(self):
square = Square().set_color(RED)
circle = Circle().set_color(YELLOW).next_to(square, UP)
# create dict from list of tuples each having key-mobject pair
pairs = [("s", square), ("c", circle)]
my_dict = VDict(pairs, show_keys=True)
# display it just like a VGroup
self.play(ShowCreation(my_dict))
self.wait()
text = Tex("Some text").set_color(GREEN).next_to(square, DOWN)
# add a key-value pair by wrapping it in a single-element list of tuple
# after attrs branch is merged, it will be easier like `.add(t=text)`
my_dict.add([("t", text)])
self.wait()
rect = Rectangle().next_to(text, DOWN)
# can also do key assignment like a python dict
my_dict["r"] = rect
# access submobjects like a python dict
my_dict["t"].set_color(PURPLE)
self.play(my_dict["t"].scale, 3)
self.wait()
# also supports python dict styled reassignment
my_dict["t"] = Tex("Some other text").set_color(BLUE)
self.wait()
# remove submoject by key
my_dict.remove("t")
self.wait()
self.play(Uncreate(my_dict["s"]))
self.wait()
self.play(FadeOut(my_dict["c"]))
self.wait()
self.play(FadeOutAndShift(my_dict["r"], DOWN))
self.wait()
# you can also make a VDict from an existing dict of mobjects
plain_dict = {
1: Integer(1).shift(DOWN),
2: Integer(2).shift(2 * DOWN),
3: Integer(3).shift(3 * DOWN),
}
vdict_from_plain_dict = VDict(plain_dict)
vdict_from_plain_dict.shift(1.5 * (UP + LEFT))
self.play(ShowCreation(vdict_from_plain_dict))
# you can even use zip
vdict_using_zip = VDict(zip(["s", "c", "r"], [Square(), Circle(), Rectangle()]))
vdict_using_zip.shift(1.5 * RIGHT)
self.play(ShowCreation(vdict_using_zip))
self.wait()
"""
def __init__(self, mapping_or_iterable={}, show_keys=False, **kwargs):
VMobject.__init__(self, **kwargs)
self.show_keys = show_keys
self.submob_dict = {}
self.add(mapping_or_iterable)
def __repr__(self):
return __class__.__name__ + "(" + repr(self.submob_dict) + ")"
def add(self, mapping_or_iterable):
"""Adds the key-value pairs to the :class:`VDict` object.
Also, it internally adds the value to the `submobjects` :class:`list`
of :class:`~.Mobject`, which is responsible for actual on-screen display.
Parameters
---------
mapping_or_iterable : Union[:class:`Mapping`, Iterable[Tuple[Hashable, :class:`~.VMobject`]]], optional
The parameter specifying the key-value mapping of keys and mobjects.
Returns
-------
:class:`VDict`
Returns the :class:`VDict` object on which this method was called.
Examples
--------
Normal usage::
square_obj = Square()
my_dict.add([('s', square_obj)])
"""
for key, value in dict(mapping_or_iterable).items():
self.add_key_value_pair(key, value)
return self
def remove(self, key):
"""Removes the mobject from the :class:`VDict` object having the key `key`
Also, it internally removes the mobject from the `submobjects` :class:`list`
of :class:`~.Mobject`, (which is responsible for removing it from the screen)
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be removed.
Returns
-------
:class:`VDict`
Returns the :class:`VDict` object on which this method was called.
Examples
--------
Normal usage::
my_dict.remove('square')
"""
if key not in self.submob_dict:
raise KeyError("The given key '%s' is not present in the VDict" % str(key))
super().remove(self.submob_dict[key])
del self.submob_dict[key]
return self
def __getitem__(self, key):
"""Override the [] operator for item retrieval.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be accessed
Returns
-------
:class:`VMobject`
The submobject corresponding to the key `key`
Examples
--------
Normal usage::
self.play(ShowCreation(my_dict['s']))
"""
submob = self.submob_dict[key]
return submob
def __setitem__(self, key, value):
"""Override the [] operator for item assignment.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be assigned
value : :class:`VMobject`
The submobject to bind the key to
Returns
-------
None
Examples
--------
Normal usage::
square_obj = Square()
my_dict['sq'] = square_obj
"""
if key in self.submob_dict:
self.remove(key)
self.add([(key, value)])
def __delitem__(self, key):
"""Override the del operator for deleting an item.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be deleted
Returns
-------
None
Examples
--------
::
>>> from manim import *
>>> my_dict = VDict({'sq': Square()})
>>> 'sq' in my_dict
True
>>> del my_dict['sq']
>>> 'sq' in my_dict
False
Notes
-----
Removing an item from a VDict does not remove that item from any Scene
that the VDict is part of.
"""
del self.submob_dict[key]
def __contains__(self, key):
"""Override the in operator.
Parameters
----------
key : :class:`typing.Hashable`
The key to check membership of.
Returns
-------
:class:`bool`
Examples
--------
::
>>> from manim import *
>>> my_dict = VDict({'sq': Square()})
>>> 'sq' in my_dict
True
"""
return key in self.submob_dict
def get_all_submobjects(self):
"""To get all the submobjects associated with a particular :class:`VDict` object
Returns
-------
:class:`dict_values`
All the submobjects associated with the :class:`VDict` object
Examples
--------
Normal usage::
for submob in my_dict.get_all_submobjects():
self.play(ShowCreation(submob))
"""
submobjects = self.submob_dict.values()
return submobjects
def add_key_value_pair(self, key, value):
"""A utility function used by :meth:`add` to add the key-value pair
to :attr:`submob_dict`. Not really meant to be used externally.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submobject to be added.
value : :class:`~.VMobject`
The mobject associated with the key
Returns
-------
None
Raises
------
TypeError
If the value is not an instance of VMobject
Examples
--------
Normal usage::
square_obj = Square()
self.add_key_value_pair('s', square_obj)
"""
if not isinstance(value, VMobject):
raise TypeError("All submobjects must be of type VMobject")
mob = value
if self.show_keys:
# This import is here and not at the top to avoid circular import
from ...mobject.svg.tex_mobject import Tex
key_text = Tex(str(key)).next_to(value, LEFT)
mob.add(key_text)
self.submob_dict[key] = mob
super().add(value)
class VectorizedPoint(VMobject):
CONFIG = {
"color": BLACK,
"fill_opacity": 0,
"stroke_width": 0,
"artificial_width": 0.01,
"artificial_height": 0.01,
}
def __init__(self, location=ORIGIN, **kwargs):
VMobject.__init__(self, **kwargs)
self.set_points(np.array([location]))
def get_width(self):
return self.artificial_width
def get_height(self):
return self.artificial_height
def get_location(self):
return np.array(self.points[0])
def set_location(self, new_loc):
self.set_points(np.array([new_loc]))
class CurvesAsSubmobjects(VGroup):
def __init__(self, vmobject, **kwargs):
VGroup.__init__(self, **kwargs)
tuples = vmobject.get_cubic_bezier_tuples()
for tup in tuples:
part = VMobject()
part.set_points(tup)
part.match_style(vmobject)
self.add(part)
class DashedVMobject(VMobject):
CONFIG = {"num_dashes": 15, "positive_space_ratio": 0.5, "color": WHITE}
def __init__(self, vmobject, **kwargs):
VMobject.__init__(self, **kwargs)
num_dashes = self.num_dashes
ps_ratio = self.positive_space_ratio
if num_dashes > 0:
# End points of the unit interval for division
alphas = np.linspace(0, 1, num_dashes + 1)
# This determines the length of each "dash"
full_d_alpha = 1.0 / num_dashes
partial_d_alpha = full_d_alpha * ps_ratio
# Rescale so that the last point of vmobject will
# be the end of the last dash
alphas /= 1 - full_d_alpha + partial_d_alpha
self.add(
*[
vmobject.get_subcurve(alpha, alpha + partial_d_alpha)
for alpha in alphas[:-1]
]
)
# Family is already taken care of by get_subcurve
# implementation
self.match_style(vmobject, family=False)
| 34.0701 | 111 | 0.586962 |
9eaa5daf469df91d3fb440a9e36c6dd007657e59 | 1,374 | py | Python | examples/server_example.py | gcollic/ffai | bb3f6707f86d3c540dca47caf4c594a93f5eac43 | [
"Apache-2.0"
] | null | null | null | examples/server_example.py | gcollic/ffai | bb3f6707f86d3c540dca47caf4c594a93f5eac43 | [
"Apache-2.0"
] | null | null | null | examples/server_example.py | gcollic/ffai | bb3f6707f86d3c540dca47caf4c594a93f5eac43 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import ffai.web.server as server
import ffai.web.api as api
from ffai.ai.registry import make_bot
from ffai.core.model import Agent
# Import this to register bots
import examples.scripted_bot_example
import examples.grodbot
# Create some games
api.new_game(home_team_id="orc-1",
away_team_id="human-1",
home_agent=make_bot("random"),
away_agent=Agent("Player 2", human=True))
api.new_game(home_team_id="orc-1",
away_team_id="human-1",
home_agent=make_bot("scripted"),
away_agent=Agent("Player 2", human=True))
api.new_game(home_team_id="orc-1",
away_team_id="human-1",
home_agent=make_bot("grodbot"),
away_agent=Agent("Player 2", human=True))
api.new_game(home_team_id="human-1",
away_team_id="human-2",
home_agent=Agent("Player 1", human=True),
away_agent=Agent("Player 2", human=True))
api.new_game(home_team_id="human-1",
away_team_id="human-2",
home_agent=make_bot("scripted"),
away_agent=make_bot("scripted"))
api.new_game(home_team_id="human-1",
away_team_id="human-2",
home_agent=make_bot("GrodBot"),
away_agent=make_bot("GrodBot"))
# Run server
server.start_server(debug=True, use_reloader=False) | 31.227273 | 54 | 0.639738 |
ed0c554ac9aaee8eda59f6c806f6a05792190b97 | 1,047 | py | Python | news_recommendation_service/click_log_processor_test.py | XinxinTang/News_Recommendation_System-AWS_version | 315bd5616c58a2762c01fbb20b163db5e792b5b5 | [
"Apache-2.0"
] | 4 | 2018-05-08T10:56:19.000Z | 2018-08-17T17:03:09.000Z | news_recommendation_service/click_log_processor_test.py | XinxinTang/News_Recommendation_System-AWS_version | 315bd5616c58a2762c01fbb20b163db5e792b5b5 | [
"Apache-2.0"
] | null | null | null | news_recommendation_service/click_log_processor_test.py | XinxinTang/News_Recommendation_System-AWS_version | 315bd5616c58a2762c01fbb20b163db5e792b5b5 | [
"Apache-2.0"
] | 2 | 2018-09-18T10:27:56.000Z | 2020-07-28T14:42:01.000Z | from news_recommendation_service import click_log_processor
import os
import sys
from datetime import datetime
# from sets import Set
import parameters
# import common package in parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
from common import AWS_mongodb_client
MONGODB_PREFERENCE_MODEL_TABLE_NAME = parameters.MONGODB_PREFERENCE_MODEL_TABLE_NAME
NEWS_TABLE_NAME = "newCollection"
NUM_OF_CLASSES = 17
# Start MongoDB before running following tests.
def test_basic():
db = AWS_mongodb_client.get_db()
db[MONGODB_PREFERENCE_MODEL_TABLE_NAME].delete_many({"userId": "test_user"})
msg = {"userId": "test_user",
"newsId": "test_news",
"timestamp": str(datetime.utcnow())}
click_log_processor.handle_message(msg)
model = db[MONGODB_PREFERENCE_MODEL_TABLE_NAME].find_one({'userId':'test_user'})
assert model is not None
assert len(model['preference']) == NUM_OF_CLASSES
print('test_basic passed!')
if __name__ == "__main__":
test_basic()
| 26.175 | 84 | 0.748806 |
92520ee9c05fe7c9adfca4755217156ca6340dd3 | 32,868 | py | Python | s2e_env/execution_trace/trace_entries.py | shijunjing/s2e-env | ba72dac30a6db65f87fea13f275003791fcf4052 | [
"BSD-3-Clause"
] | 1 | 2022-01-27T15:27:30.000Z | 2022-01-27T15:27:30.000Z | s2e_env/execution_trace/trace_entries.py | weizhou-chaojixx/s2e-env | ba72dac30a6db65f87fea13f275003791fcf4052 | [
"BSD-3-Clause"
] | null | null | null | s2e_env/execution_trace/trace_entries.py | weizhou-chaojixx/s2e-env | ba72dac30a6db65f87fea13f275003791fcf4052 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from abc import ABCMeta, abstractmethod
import binascii
import logging
import re
import struct
from enum import Enum
logger = logging.getLogger('trace_entries')
# Not much we can do about these for now
# pylint: disable=too-many-arguments
# pylint: disable=too-many-lines
# pylint: disable=too-many-instance-attributes
#
# The following code is a Python adaption of the C++ code in
# libs2eplugins/src/s2e/Plugins/ExecutionTracers/TraceEntries.h
#
class TraceEntryType(Enum):
"""
The different types of trace entries that can be logged.
"""
TRACE_MOD_LOAD = 0
TRACE_MOD_UNLOAD = 1
TRACE_PROC_UNLOAD = 2
TRACE_CALL = 3
TRACE_RET = 4
TRACE_TB_START = 5
TRACE_TB_END = 6
TRACE_MODULE_DESC = 7
TRACE_FORK = 8
TRACE_CACHESIM = 9
TRACE_TESTCASE = 10
TRACE_BRANCHCOV = 11
TRACE_MEMORY = 12
TRACE_PAGEFAULT = 13
TRACE_TLBMISS = 14
TRACE_ICOUNT = 15
TRACE_MEM_CHECKER = 16
TRACE_EXCEPTION = 17
TRACE_STATE_SWITCH = 18
TRACE_TB_START_X64 = 19
TRACE_TB_END_X64 = 20
TRACE_BLOCK = 21
TRACE_OSINFO = 22
TRACE_MAX = 23
class TraceEntryError(Exception):
"""
An error occurred with a trace entry.
"""
pass
class TraceEntry(object):
"""
Abstract trace entry class.
Defines how a particular trace entry is serialized when logged and
deserialized when read from a log.
Depending on the trace entry type (as defined by the ``TraceEntryType``
enum), the format of a trace entry may be static or dynamic. If a trace
entry format is static, then the corresponding trace entry will have a
consistent size and format whenever it is serialized/deserialized. If a
trace entry is not static, then the entry probably contains a variable
number of elements that can only be determined at run-time.
The static trace entry format is defined in the ``FORMAT`` class attribute
and its size can be determined using the ``static_size`` class method.
The dynamic trace entry format is defined in the ``_struct`` attribute
and its size can be determined using the ``len`` function.
Note that ``TraceEntry``'s default ``deserialize`` method will only work if
the trace entry's format can be determined statically. Otherwise the user
must implement their own deserialize routine (e.g. as is done in the
``TraceFork`` class). When calling a custom ``deserialize`` method the
run-time size of the item **must** be provided.
"""
# Abstract method
__metaclass__ = ABCMeta
FORMAT = None
def __init__(self, fmt=''):
self._struct = struct.Struct(fmt)
def __len__(self):
"""
The length of the object when serialized.
"""
return self._struct.size
def __nonzero__(self): # pylint: disable=no-self-use
"""
Allows using tests like "if not item".
__len__ may return False for some types of objects.
"""
return True
def as_dict(self): # pylint: disable=no-self-use
"""
Get a dictionary representation of the trace entry.
This method should be overwritten.
"""
return {}
def as_json_dict(self):
"""
Get a dictionary suitable for JSON serialization.
All binary data should be serialized to a JSON-compatible format.
This method should be overwritten if as_dict may return binary data.
"""
return self.as_dict()
def __str__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s=%s' % (key, value) for key, value in self.as_dict().iteritems()))
@classmethod
def static_size(cls):
try:
return struct.calcsize(cls.FORMAT)
except struct.error:
raise TraceEntryError('Cannot statically determine the size of %s' % cls.__name__)
@classmethod
def deserialize(cls, data, size=None): # pylint: disable=unused-argument
try:
unpacked_data = struct.unpack(cls.FORMAT, data)
return cls(*unpacked_data)
except struct.error:
raise TraceEntryError('Cannot deserialize %s data' % cls.__name__)
@abstractmethod
def serialize(self):
"""
Serializes the object using the given ``_struct`` property. The user
must specify the order that elements are serialized.
E.g.
```
def serialize(self):
return self._struct.pack(self._elems['foo'], self._elems['bar'])
```
"""
raise NotImplementedError('Subclasses of TraceEntry must provide a '
'serialize method')
class TraceItemHeader(TraceEntry):
FORMAT = '<IIQQQQI'
def __init__(self, type_, state_id, timestamp, address_space, pid, pc, size):
super(TraceItemHeader, self).__init__(TraceItemHeader.FORMAT)
self._type = TraceEntryType(type_)
self._state_id = state_id
self._timestamp = timestamp
self._address_space = address_space
self._pid = pid
self._pc = pc
self._size = size
def serialize(self):
return self._struct.pack(self._type,
self._state_id,
self._timestamp,
self._address_space,
self._pid,
self._pc,
self._size)
def as_dict(self):
return {
'type': self.type,
'stateId': self.state_id,
'timestamp': self.timestamp,
'address_space': self.address_space,
'pid': self.pid,
'pc': self.pc,
'size': self.size,
}
@property
def type(self):
return self._type
@property
def state_id(self):
return self._state_id
@property
def timestamp(self):
return self._timestamp
@property
def address_space(self):
return self._address_space
@property
def pid(self):
return self._pid
@property
def pc(self):
return self._pc
@property
def size(self):
return self._size
class TraceModuleLoad(TraceEntry):
FORMAT = '<32s256sQQQQQ'
def __init__(self, name, path, load_base, native_base, size, address_space,
pid):
super(TraceModuleLoad, self).__init__(TraceModuleLoad.FORMAT)
self._name = name
self._path = path
self._load_base = load_base
self._native_base = native_base
self._size = size
self._address_space = address_space
self._pid = pid
def serialize(self):
return self._struct.pack(self._name,
self._path,
self._load_base,
self._native_base,
self._size,
self._address_space,
self._pid)
def as_dict(self):
return {
'name': self.name,
'path': self.path,
'loadBase': self.load_base,
'nativeBase': self.native_base,
'size': self.size,
'addressSpace': self.address_space,
'pid': self.pid,
}
@property
def name(self):
return self._name.rstrip('\0')
@property
def path(self):
return self._path.rstrip('\0')
@property
def load_base(self):
return self._load_base
@property
def native_base(self):
return self._native_base
@property
def size(self):
return self._size
@property
def address_space(self):
return self._address_space
@property
def pid(self):
return self._pid
class TraceModuleUnload(TraceEntry):
FORMAT = '<QQQ'
def __init__(self, load_base, address_space, pid):
super(TraceModuleUnload, self).__init__(TraceModuleUnload.FORMAT)
self._load_base = load_base
self._address_space = address_space
self._pid = pid
def serialize(self):
return self._struct.pack(self._load_base)
def as_dict(self):
return {
'load_base': self.load_base,
'address_space': self.address_space,
'pid': self.pid,
}
@property
def load_base(self):
return self._load_base
@property
def address_space(self):
return self._address_space
@property
def pid(self):
return self._pid
class TraceProcessUnload(TraceEntry):
FORMAT = '<Q'
def __init__(self, return_code):
super(TraceProcessUnload, self).__init__(TraceProcessUnload.FORMAT)
self._return_code = return_code
def serialize(self):
return self._struct.pack(self._return_code)
def as_dict(self):
return {
'returnCode': self.return_code,
}
@property
def return_code(self):
return self._return_code
class TraceCall(TraceEntry):
FORMAT = '<QQ'
def __init__(self, source, target):
super(TraceCall, self).__init__(TraceCall.FORMAT)
self._source = source
self._target = target
def serialize(self):
return self._struct.pack(self._source, self._target)
def as_dict(self):
return {
'source': self.source,
'target': self.target,
}
@property
def source(self):
return self._source
@property
def target(self):
return self._target
class TraceReturn(TraceEntry):
FORMAT = '<QQ'
def __init__(self, source, target):
super(TraceReturn, self).__init__(TraceReturn.FORMAT)
self._source = source
self._target = target
def serialize(self):
return self._struct.pack(self._source, self._target)
def as_dict(self):
return {
'source': self.source,
'target': self.target,
}
@property
def source(self):
return self._source
@property
def target(self):
return self._target
class TraceFork(TraceEntry):
FORMAT = '<I%dI'
def __init__(self, children):
super(TraceFork, self).__init__(TraceFork.FORMAT % len(children))
self._children = children
@classmethod
def deserialize(cls, data, size=None):
if not size:
raise TraceEntryError('A size must be provided when deserializing '
'a ``TraceFork`` item')
num_children = (size - struct.calcsize('<I')) / struct.calcsize('<I')
unpacked_data = struct.unpack(TraceFork.FORMAT % num_children, data)
return TraceFork(unpacked_data[1:])
def serialize(self):
return self._struct.pack(len(self._children), *self._children)
def as_dict(self):
return {
'children': self.children,
}
@property
def children(self):
return self._children
class TraceBranchCoverage(TraceEntry):
FORMAT = '<QQ'
def __init__(self, pc, dest_pc):
super(TraceBranchCoverage, self).__init__(TraceBranchCoverage.FORMAT)
self._pc = pc
self._dest_pc = dest_pc
def serialize(self):
return self._struct.pack(self._pc, self._dest_pc)
def as_dict(self):
return {
'pc': self.pc,
'destPc': self.dest_pc,
}
@property
def pc(self):
return self._pc
@property
def dest_pc(self):
return self._dest_pc
class TraceCacheSimType(Enum):
CACHE_PARAMS = 0
CACHE_NAME = 1
CACHE_ENTRY = 2
class TraceCacheSimParams(TraceEntry):
FORMAT = '<BIIIII'
def __init__(self, type_, cache_id, size, line_size, associativity,
upper_cache_id):
super(TraceCacheSimParams, self).__init__(TraceCacheSimParams.FORMAT)
self._type = type_
self._cache_id = cache_id
self._size = size
self._line_size = line_size
self._associativity = associativity
self._upper_cache_id = upper_cache_id
def serialize(self):
return self._struct.pack(self._type,
self._cache_id,
self._size,
self._line_size,
self._associativity,
self._upper_cache_id)
def as_dict(self):
return {
'type': self.type,
'cacheId': self.cache_id,
'size': self.size,
'lineSize': self.line_size,
'associativity': self.associativity,
'upperCacheId': self.upper_cache_id,
}
@property
def type(self):
return TraceCacheSimType(self._type)
@property
def cache_id(self):
return self._cache_id
@property
def size(self):
return self._size
@property
def line_size(self):
return self._line_size
@property
def associativity(self):
return self._associativity
@property
def upper_cache_id(self):
return self._upper_cache_id
class TraceCacheSimName(TraceEntry):
FORMAT = '<BII%ds'
def __init__(self, type_, id_, name):
super(TraceCacheSimName, self).__init__(TraceCacheSimName.FORMAT % len(name))
self._type = type_
self._id = id_
self._name = name
@classmethod
def deserialize(cls, data, size=None):
if not size:
raise TraceEntryError('A size must be provided when deserializing '
'a ``TraceCacheSimName`` item')
length = (size - struct.calcsize('<BII')) / struct.calcsize('<c')
unpacked_data = struct.unpack(TraceCacheSimName.FORMAT % length, data)
return TraceCacheSimName(*unpacked_data)
def serialize(self):
return self._struct.pack(self._type,
self._id,
len(self._name),
self._name)
def as_dict(self):
return {
'type': self.type,
'id': self.id,
'name': self.name,
}
@property
def type(self):
return TraceCacheSimType(self._type)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
class TraceCacheSimEntry(TraceEntry):
FORMAT = '<BBQQBBBB'
def __init__(self, type_, cache_id, pc, address, size, is_write, is_code,
miss_count):
super(TraceCacheSimEntry, self).__init__(TraceCacheSimEntry.FORMAT)
self._type = type_
self._cache_id = cache_id
self._pc = pc
self._address = address
self._size = size
self._is_write = is_write
self._is_code = is_code
self._miss_count = miss_count
def serialize(self):
return self._struct.pack(self._type,
self._cache_id,
self._pc,
self._address,
self._size,
self._is_write,
self._is_code,
self._miss_count)
def as_dict(self):
return {
'type': self.type,
'cacheId': self.cache_id,
'pc': self.pc,
'address': self.address,
'size': self.size,
'isWrite': self.is_write,
'isCode': self.is_code,
'missCount': self.miss_count,
}
@property
def type(self):
return TraceCacheSimType(self._type)
@property
def cache_id(self):
return self._cache_id
@property
def pc(self):
return self._pc
@property
def address(self):
return self._address
@property
def size(self):
return self._size
@property
def is_write(self):
return self._is_write
@property
def is_code(self):
return self._is_code
@property
def miss_count(self):
return self._miss_count
class TraceCache(TraceEntry):
FORMAT = '<B{params}s%ds{entry}s'.format(params=TraceCacheSimParams.static_size(),
entry=TraceCacheSimEntry.static_size())
def __init__(self, type_, params, name, entry):
super(TraceCache, self).__init__(TraceCache.FORMAT % len(name))
self._type = type_
self._params = params
self._name = name
self._entry = entry
@classmethod
def deserialize(cls, data, size=None):
if not size:
raise TraceEntryError('A size must be provided when deserializing '
'a ``TraceCache`` item')
name_length = (size - struct.calcsize('<B') - TraceCacheSimParams.static_size() -
TraceCacheSimEntry.static_size()) / struct.calcsize('<c')
unpacked_data = struct.unpack(TraceCache.FORMAT % name_length, data)
params = TraceCacheSimParams.deserialize(unpacked_data[1])
name = TraceCacheSimName.deserialize(unpacked_data[2], name_length)
entry = TraceCacheSimEntry.deserialize(unpacked_data[3])
return TraceCache(unpacked_data[0], params, name, entry)
def serialize(self):
return self._struct.pack(self._type,
self._params.serialize(),
self._name.serialize(),
self._entry.serialize())
def as_dict(self):
return {
'type': self.type,
'params': self.params,
'name': self.name,
'entry': self.entry,
}
@property
def type(self):
return self._type
@property
def params(self):
return self._params
@property
def name(self):
return self._name
@property
def entry(self):
return self._entry
class TraceMemChecker(TraceEntry):
class Flags(Enum):
GRANT = 1
REVOKE = 2
READ = 4
WRITE = 8
EXECUTE = 16
RESOURCE = 32
FORMAT = '<QIII%ds'
def __init__(self, start, size, flags, name):
super(TraceMemChecker, self).__init__(TraceMemChecker.FORMAT)
self._start = start
self._size = size
self._flags = flags
self._name = name
@classmethod
def deserialize(cls, data, size=None):
if not size:
raise TraceEntryError('A size must be provided when deserializing '
'a ``TraceMemChecker`` item')
name_length = (size - struct.calcsize('<QIII')) / struct.calcsize('<c')
unpacked_data = struct.unpack(TraceMemChecker.FORMAT % name_length, data)
return TraceMemChecker(*unpacked_data)
def serialize(self):
return self._struct.pack(self._start,
self._size,
self._flags,
len(self._name),
self._name)
def as_dict(self):
return {
'start': self.start,
'size': self.size,
'flags': self.flags,
'name': self.name,
}
@property
def start(self):
return self._start
@property
def size(self):
return self._size
@property
def flags(self):
return self._flags
@property
def name(self):
return self._name
class TraceTestCase(TraceEntry):
"""
A test case payload consists of a sequence of <header, name, data> entries,
where header describes the length of the string name and the size of the data.
"""
HEADER_FORMAT = '<II'
HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
"""
S2E generates one test case entry for each symbolic variable in the following format: vXXX_var_name_YYY
XXX represents the sequence of the variable in the current state. For example, XXX=2 means that this was
the variable associated with the 3rd call of s2e_make_symbolic.
YYY represents the absolute sequence number since S2E start. For example, YYY=999 means that this was the
1000th invocation of s2e_make_symbolic since S2E started. Note that this number may be pretty random depending
on the specific schedule of paths in a given S2E run.
"""
ENTRY_PATTERN = r'^v(\d+)_(.+?)_\d+$'
ENTRY_REGEX = re.compile(ENTRY_PATTERN)
def __init__(self, data):
super(TraceTestCase, self).__init__()
self._data = data
self._testcase = {}
self._initialize_test_case_items()
self._testcase = TraceTestCase._parse_test_case_entries(self._testcase)
@classmethod
def deserialize(cls, data, size=None):
# A zero size is valid for a test case
if size is None:
raise TraceEntryError('A size must be provided when deserializing a ``TraceTestCase`` item')
return TraceTestCase(data)
def serialize(self):
raise NotImplementedError('Unable to serialize trace test cases')
def _read_test_case_item(self):
name_size, data_size = struct.unpack(TraceTestCase.HEADER_FORMAT, self._data[:TraceTestCase.HEADER_SIZE])
self._data = self._data[TraceTestCase.HEADER_SIZE:]
entry = '%ds%ds' % (name_size, data_size)
entry_size = struct.calcsize(entry)
tc = struct.unpack(entry, self._data[:entry_size])
self._data = self._data[entry_size:]
return tc
def _initialize_test_case_items(self):
while self._data:
tc = self._read_test_case_item()
self._testcase[tc[0]] = tc[1]
@staticmethod
def _parse_test_case_entries(entries):
"""
Returns an ordered array according to vXXX so that it is easier to manipulate test cases.
"""
ret = []
for k, v in entries.iteritems():
result = TraceTestCase.ENTRY_REGEX.match(k)
if not result:
logger.warn('Invalid test case entry: %s', k)
continue
local_seq = int(result.group(1))
while local_seq >= len(ret):
ret.append(None)
# Ignore the absolute sequence number, it's not really useful
var_name = result.group(2)
ret[local_seq] = (var_name, v)
return ret
def as_dict(self):
return {'testcase': self._testcase}
def as_json_dict(self):
ret = [(var_name, binascii.hexlify(v)) for var_name, v in self._testcase]
return {'testcase': ret}
@property
def testcase(self):
return self._testcase
class TraceMemory(TraceEntry):
FORMAT = '<QQQBBQQ'
def __init__(self, pc, address, value, size, flags, host_address,
concrete_buffer):
super(TraceMemory, self).__init__(TraceMemory.FORMAT)
self._pc = pc
self._address = address
self._value = value
self._size = size
self._flags = flags
self._host_address = host_address
self._concrete_buffer = concrete_buffer
def serialize(self):
return self._struct.pack(self._pc,
self._address,
self._value,
self._size,
self._flags,
self._host_address,
self._concrete_buffer)
def as_dict(self):
return {
'pc': self.pc,
'address': self.address,
'value': self.value,
'size': self.size,
'flags': self.flags,
'hostAddress': self.host_address,
'concreteBuffer': self.concrete_buffer,
}
@property
def pc(self):
return self._pc
@property
def address(self):
return self._address
@property
def value(self):
return self._value
@property
def size(self):
return self._size
@property
def flags(self):
return self._flags
@property
def host_address(self):
return self._host_address
@property
def concrete_buffer(self):
return self._concrete_buffer
class TracePageFault(TraceEntry):
FORMAT = '<QQB'
def __init__(self, pc, address, is_write):
super(TracePageFault, self).__init__(TracePageFault.FORMAT)
self._pc = pc
self._address = address
self._is_write = is_write
def serialize(self):
return self._struct.pack(self._pc, self._address, self._is_write)
def as_dict(self):
return {
'pc': self.pc,
'address': self.address,
'isWrite': self.is_write,
}
@property
def pc(self):
return self._pc
@property
def address(self):
return self._address
@property
def is_write(self):
return self._is_write
class TraceTLBMiss(TraceEntry):
FORMAT = '<QQB'
def __init__(self, pc, address, is_write):
super(TraceTLBMiss, self).__init__(TraceTLBMiss.FORMAT)
self._pc = pc
self._address = address
self._is_write = is_write
def serialize(self):
return self._struct.pack(self._pc, self._address, self._is_write)
def as_dict(self):
return {
'pc': self.pc,
'address': self.address,
'isWrite': self.is_write,
}
@property
def pc(self):
return self._pc
@property
def address(self):
return self._address
@property
def is_write(self):
return self._is_write
class TraceInstructionCount(TraceEntry):
FORMAT = '<Q'
def __init__(self, count):
super(TraceInstructionCount, self).__init__(TraceInstructionCount.FORMAT)
self._count = count
def serialize(self):
return self._struct.pack(self._count)
def as_dict(self):
return {
'count': self.count,
}
@property
def count(self):
return self._count
class TraceTranslationBlock(TraceEntry):
class TranslationBlockType(Enum):
TB_DEFAULT = 0
TB_JMP = 1
TB_JMP_IND = 2
TB_COND_JMP = 3
TB_COND_JMP_IND = 4
TB_CALL = 5
TB_CALL_IND = 6
TB_REP = 7
TB_RET = 8
class X86Registers(Enum):
EAX = 0
ECX = 1
EDX = 2
EBX = 3
ESP = 4
EBP = 5
ESI = 6
EDI = 7
class TranslationBlockFlags(Enum):
RUNNING_CONCRETE = 1 << 0
RUNNING_EXCEPTION_EMULATION_CODE = 1 << 1
FORMAT = '<QQIBBB8Q'
def __init__(self, pc, target_pc, size, tb_type, flags, symb_mask,
registers):
super(TraceTranslationBlock, self).__init__(TraceTranslationBlock.FORMAT)
self._pc = pc
self._target_pc = target_pc
self._size = size
self._tb_type = tb_type
self._flags = flags
self._symb_mask = symb_mask
self._registers = registers
def serialize(self):
return self._struct.pack(self._pc,
self._target_pc,
self._size,
self._tb_type,
self._flags,
self._symb_mask,
*self._registers)
def as_dict(self):
return {
'pc': self.pc,
'targetPc': self.target_pc,
'size': self.size,
'tbType': self.tb_type,
'flags': self.flags,
'symbMask': self.symb_mask,
'registers': self.registers,
}
@property
def pc(self):
return self._pc
@property
def target_pc(self):
return self._target_pc
@property
def size(self):
return self._size
@property
def tb_type(self):
return TraceTranslationBlock.TranslationBlockType(self._tb_type)
@property
def flags(self):
return TraceTranslationBlock.TranslationBlockFlags(self._flags)
@property
def symb_mask(self):
return self._symb_mask
@property
def registers(self):
return self._registers
class TraceBlock(TraceEntry):
class TranslationBlockType(Enum):
TB_DEFAULT = 0
TB_JMP = 1
TB_JMP_IND = 2
TB_COND_JMP = 3
TB_COND_JMP_IND = 4
TB_CALL = 5
TB_CALL_IND = 6
TB_REP = 7
TB_RET = 8
FORMAT = '<QQB'
def __init__(self, start_pc, end_pc, tb_type):
super(TraceBlock, self).__init__(TraceBlock.FORMAT)
self._start_pc = start_pc
self._end_pc = end_pc
self._tb_type = tb_type
def serialize(self):
return self._struct.pack(self._start_pc, self._end_pc, self._tb_type)
def as_dict(self):
return {
'startPc': self.start_pc,
'endPc': self.end_pc,
'tbType': self.tb_type,
}
@property
def start_pc(self):
return self._start_pc
@property
def end_pc(self):
return self._end_pc
@property
def tb_type(self):
return TraceBlock.TranslationBlockType(self._tb_type)
class TraceTranslationBlock64(TraceEntry):
FORMAT = '<SB8Q'
def __init__(self, base, symb_mask, extended_registers):
super(TraceTranslationBlock64, self).__init__(TraceTranslationBlock64.FORMAT)
self._base = base
self._symb_mask = symb_mask
self._extended_registers = extended_registers
def serialize(self):
return self._struct.pack(self._base.serialize(),
self._symb_mask,
*self._extended_registers)
def as_dict(self):
return {
'base': self.base,
'symbMask': self.symb_mask,
'extendedRegisters': self.extended_registers,
}
@property
def base(self):
return self._base
@property
def symb_mask(self):
return self._symb_mask
@property
def extended_registers(self):
return self._extended_registers
class TraceException(TraceEntry):
FORMAT = '<QI'
def __init__(self, pc, vector):
super(TraceException, self).__init__(TraceException.FORMAT)
self._pc = pc
self._vector = vector
def serialize(self):
return self._struct.pack(self._pc, self._vector)
def as_dict(self):
return {
'pc': self.pc,
'vector': self.vector,
}
@property
def pc(self):
return self._pc
@property
def vector(self):
return self._vector
class TraceStateSwitch(TraceEntry):
FORMAT = '<I'
def __init__(self, new_state_id):
super(TraceStateSwitch, self).__init__(TraceStateSwitch.FORMAT)
self._new_state_id = new_state_id
def serialize(self):
return self._struct.pack(self._new_state_id)
def as_dict(self):
return {
'newStateId': self.new_state_id,
}
@property
def new_state_id(self):
return self._new_state_id
class TraceOSInfo(TraceEntry):
FORMAT = '<Q'
def __init__(self, kernel_start):
super(TraceOSInfo, self).__init__(TraceOSInfo.FORMAT)
self._kernel_start = kernel_start
def serialize(self):
return self._struct.pack(self._kernel_start)
def as_dict(self):
return {
'kernel_start': self.kernel_start,
}
@property
def kernel_start(self):
return self._kernel_start
| 26.765472 | 114 | 0.590635 |
c1043155b28251fcef01fdd8b447b9d8d2c6ac1b | 944 | py | Python | practice/20210310.py | yanghao2013/algo | d1173f7582b64f9154acb30e852dc3f0d0d6ba5e | [
"Apache-2.0"
] | null | null | null | practice/20210310.py | yanghao2013/algo | d1173f7582b64f9154acb30e852dc3f0d0d6ba5e | [
"Apache-2.0"
] | null | null | null | practice/20210310.py | yanghao2013/algo | d1173f7582b64f9154acb30e852dc3f0d0d6ba5e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Time : 2021-03-10 00:00
@Author : Hao
"""
class Solution:
"""
URL化。编写一种方法,将字符串中的空格全部替换为%20。假定该字符串尾部有足够的空间存放新增字符,并且知道字符串的“真实”长度。(注:用Java实现的话,请使用字符数组实现,以便直接在数组上操作。)
链接:https://leetcode-cn.com/problems/string-to-url-lcci
"""
def replaceSpaces(self, S: str, length: int) -> str:
if length == 0:
return ""
t = S[0:length]
s = ""
for i in t:
if i == ' ':
s += "%20"
else:
s += i
return s
"""
给你一个字符串 s 和一个 长度相同 的整数数组 indices 。请你重新排列字符串 s ,其中第 i 个字符需要移动到 indices[i] 指示的位置。返回重新排列后的字符串。
链接:https://leetcode-cn.com/problems/shuffle-string
"""
def restoreString(self, s: str, indices: list[int]) -> str:
if len(s) == 0:
return ""
tmp = ['0'] * len(s)
for i in range(len(s)):
tmp[indices[i]] = s[i]
return "".join(tmp) | 26.971429 | 104 | 0.509534 |
2142198902ade5f3c5567e1e06e42c646acf667e | 434 | py | Python | ex2/admin.py | pjdufour/ex2 | d1ef05a2e0b59c76266dc0a809be74af2618a154 | [
"BSD-3-Clause"
] | null | null | null | ex2/admin.py | pjdufour/ex2 | d1ef05a2e0b59c76266dc0a809be74af2618a154 | [
"BSD-3-Clause"
] | null | null | null | ex2/admin.py | pjdufour/ex2 | d1ef05a2e0b59c76266dc0a809be74af2618a154 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from .models import Country, HotSpot
class CountryAdmin(admin.ModelAdmin):
model = Country
list_display_links = ('id',)
list_display = ('id', 'name', 'iso2', 'iso3', 'pop2005')
class HotSpotAdmin(admin.ModelAdmin):
model = HotSpot
list_display_links = ('id', )
list_display = ('id', )
admin.site.register(Country, CountryAdmin)
admin.site.register(HotSpot, HotSpotAdmin)
| 22.842105 | 60 | 0.702765 |
5fe6b8b2f37d03e29245f1688021e694d98d340f | 3,055 | py | Python | tests/test_wrap.py | dhirschfeld/pyfilesystem2 | b2c0d96f55d4dfe777b4f9476676b77d01f36bf7 | [
"MIT"
] | null | null | null | tests/test_wrap.py | dhirschfeld/pyfilesystem2 | b2c0d96f55d4dfe777b4f9476676b77d01f36bf7 | [
"MIT"
] | null | null | null | tests/test_wrap.py | dhirschfeld/pyfilesystem2 | b2c0d96f55d4dfe777b4f9476676b77d01f36bf7 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import unittest
from fs import errors
from fs import open_fs
from fs import wrap
class TestWrap(unittest.TestCase):
def test_readonly(self):
mem_fs = open_fs("mem://")
fs = wrap.read_only(mem_fs)
with self.assertRaises(errors.ResourceReadOnly):
fs.open("foo", "w")
with self.assertRaises(errors.ResourceReadOnly):
fs.appendtext("foo", "bar")
with self.assertRaises(errors.ResourceReadOnly):
fs.appendbytes("foo", b"bar")
with self.assertRaises(errors.ResourceReadOnly):
fs.makedir("foo")
with self.assertRaises(errors.ResourceReadOnly):
fs.move("foo", "bar")
with self.assertRaises(errors.ResourceReadOnly):
fs.openbin("foo", "w")
with self.assertRaises(errors.ResourceReadOnly):
fs.remove("foo")
with self.assertRaises(errors.ResourceReadOnly):
fs.removedir("foo")
with self.assertRaises(errors.ResourceReadOnly):
fs.setinfo("foo", {})
with self.assertRaises(errors.ResourceReadOnly):
fs.settimes("foo", {})
with self.assertRaises(errors.ResourceReadOnly):
fs.copy("foo", "bar")
with self.assertRaises(errors.ResourceReadOnly):
fs.create("foo")
with self.assertRaises(errors.ResourceReadOnly):
fs.settext("foo", "bar")
with self.assertRaises(errors.ResourceReadOnly):
fs.setbytes("foo", b"bar")
with self.assertRaises(errors.ResourceReadOnly):
fs.makedirs("foo/bar")
with self.assertRaises(errors.ResourceReadOnly):
fs.touch("foo")
with self.assertRaises(errors.ResourceReadOnly):
fs.setbinfile("foo", None)
with self.assertRaises(errors.ResourceReadOnly):
fs.setfile("foo", None)
self.assertTrue(mem_fs.isempty("/"))
mem_fs.setbytes("file", b"read me")
with fs.openbin("file") as read_file:
self.assertEqual(read_file.read(), b"read me")
with fs.open("file", "rb") as read_file:
self.assertEqual(read_file.read(), b"read me")
def test_cachedir(self):
mem_fs = open_fs("mem://")
mem_fs.makedirs("foo/bar/baz")
mem_fs.touch("egg")
fs = wrap.cache_directory(mem_fs)
self.assertEqual(sorted(fs.listdir("/")), ["egg", "foo"])
self.assertEqual(sorted(fs.listdir("/")), ["egg", "foo"])
self.assertTrue(fs.isdir("foo"))
self.assertTrue(fs.isdir("foo"))
self.assertTrue(fs.isfile("egg"))
self.assertTrue(fs.isfile("egg"))
self.assertEqual(fs.getinfo("foo"), mem_fs.getinfo("foo"))
self.assertEqual(fs.getinfo("foo"), mem_fs.getinfo("foo"))
self.assertEqual(fs.getinfo("/"), mem_fs.getinfo("/"))
self.assertEqual(fs.getinfo("/"), mem_fs.getinfo("/"))
with self.assertRaises(errors.ResourceNotFound):
fs.getinfo("/foofoo")
| 31.173469 | 66 | 0.608838 |
1e1cee0b187e7b1fd449bbfeec90105ed7d19af6 | 38,019 | py | Python | plexlibrary/recipe.py | mza921/python-plexlibrary | f111966cd4532755d91e3c6f181114ee46639309 | [
"BSD-3-Clause"
] | 5 | 2020-02-03T16:22:29.000Z | 2021-05-22T01:21:12.000Z | plexlibrary/recipe.py | mza921/python-plexlibrary | f111966cd4532755d91e3c6f181114ee46639309 | [
"BSD-3-Clause"
] | 5 | 2020-10-26T01:44:36.000Z | 2021-02-12T10:08:42.000Z | plexlibrary/recipe.py | mza921/python-plexlibrary | f111966cd4532755d91e3c6f181114ee46639309 | [
"BSD-3-Clause"
] | 2 | 2020-01-16T18:34:31.000Z | 2020-02-07T17:41:28.000Z | # -*- coding: utf-8 -*-
"""recipe
"""
import datetime
import errno
import os
import random
import subprocess
import sys
import time
import logs
import plexapi
import plexutils
import tmdb
import traktutils
import imdbutils
import tvdb
from config import ConfigParser
from recipes import RecipeParser
from utils import Colors, add_years
class Recipe(object):
plex = None
trakt = None
tmdb = None
tvdb = None
def __init__(self, recipe_name, sort_only=False, config_file=None, use_playlists=False):
self.recipe_name = recipe_name
self.use_playlists = use_playlists
self.config = ConfigParser(config_file)
self.recipe = RecipeParser(recipe_name)
if not self.config.validate():
raise Exception("Error(s) in config")
if not self.recipe.validate(use_playlists=use_playlists):
raise Exception("Error(s) in recipe")
if self.recipe['library_type'].lower().startswith('movie'):
self.library_type = 'movie'
elif self.recipe['library_type'].lower().startswith('tv'):
self.library_type = 'tv'
else:
raise Exception("Library type should be 'movie' or 'tv'")
self.source_library_config = self.recipe['source_libraries']
self.plex = plexutils.Plex(self.config['plex']['baseurl'],
self.config['plex']['token'])
if self.config['trakt']['username']:
self.trakt = traktutils.Trakt(
self.config['trakt']['username'],
client_id=self.config['trakt']['client_id'],
client_secret=self.config['trakt']['client_secret'],
oauth_token=self.config['trakt'].get('oauth_token', ''),
oauth=self.recipe.get('trakt_oauth', False),
config=self.config)
if self.trakt.oauth_token:
self.config['trakt']['oauth_token'] = self.trakt.oauth_token
if self.config['tmdb']['api_key']:
self.tmdb = tmdb.TMDb(
self.config['tmdb']['api_key'],
cache_file=self.config['tmdb']['cache_file'])
if self.config['tvdb']['username']:
self.tvdb = tvdb.TheTVDB(self.config['tvdb']['username'],
self.config['tvdb']['api_key'],
self.config['tvdb']['user_key'])
self.imdb = imdbutils.IMDb(self.tmdb, self.tvdb)
def _get_trakt_lists(self):
item_list = [] # TODO Replace with dict, scrap item_ids?
item_ids = []
for url in self.recipe['source_list_urls']:
max_age = (self.recipe['new_playlist'].get('max_age', 0) if self.use_playlists
else self.recipe['new_library'].get('max_age', 0))
if 'api.trakt.tv' in url:
(item_list, item_ids) = self.trakt.add_items(
self.library_type, url, item_list, item_ids,
max_age or 0)
elif 'imdb.com/chart' in url:
(item_list, item_ids) = self.imdb.add_items(
self.library_type, url, item_list, item_ids,
max_age or 0)
else:
raise Exception("Unsupported source list: {url}".format(
url=url))
if self.recipe['weighted_sorting']['enabled']:
if self.config['tmdb']['api_key']:
logs.info(u"Getting data from TMDb to add weighted sorting...")
item_list = self.weighted_sorting(item_list)
else:
logs.warning(u"Warning: TMDd API key is required "
u"for weighted sorting")
return item_list, item_ids
def _get_plex_libraries(self):
source_libraries = []
for library_config in self.source_library_config:
logs.info(u"Trying to match with items from the '{}' library ".format(
library_config['name']))
try:
source_library = self.plex.server.library.section(
library_config['name'])
except: # FIXME
raise Exception("The '{}' library does not exist".format(
library_config['name']))
# FIXME: Hack until a new plexapi version is released. 3.0.4?
if 'guid' not in source_library.ALLOWED_FILTERS:
source_library.ALLOWED_FILTERS += ('guid',)
source_libraries.append(source_library)
return source_libraries
def _get_matching_items(self, source_libraries, item_list):
matching_items = []
missing_items = []
matching_total = 0
nonmatching_idx = []
max_count = (self.recipe['new_playlist'].get('max_count', 0) if self.use_playlists
else self.recipe['new_library'].get('max_count', 0))
for i, item in enumerate(item_list):
match = False
if 0 < max_count <= matching_total:
nonmatching_idx.append(i)
continue
res = []
for source_library in source_libraries:
lres = source_library.search(guid='imdb://' + str(item['id']))
if not lres and item.get('tmdb_id'):
lres += source_library.search(
guid='themoviedb://' + str(item['tmdb_id']))
if not lres and item.get('tvdb_id'):
lres += source_library.search(
guid='thetvdb://' + str(item['tvdb_id']))
if lres:
res += lres
if not res:
missing_items.append((i, item))
nonmatching_idx.append(i)
continue
for r in res:
imdb_id = None
tmdb_id = None
tvdb_id = None
if r.guid is not None and 'imdb://' in r.guid:
imdb_id = r.guid.split('imdb://')[1].split('?')[0]
elif r.guid is not None and 'themoviedb://' in r.guid:
tmdb_id = r.guid.split('themoviedb://')[1].split('?')[0]
elif r.guid is not None and 'thetvdb://' in r.guid:
tvdb_id = (r.guid.split('thetvdb://')[1]
.split('?')[0]
.split('/')[0])
if ((imdb_id and imdb_id == str(item['id']))
or (tmdb_id and tmdb_id == str(item['tmdb_id']))
or (tvdb_id and tvdb_id == str(item['tvdb_id']))):
if not match:
match = True
matching_total += 1
matching_items.append(r)
if match:
if not self.use_playlists and self.recipe['new_library']['sort_title']['absolute']:
logs.info(u"{} {} ({})".format(
i + 1, item['title'], item['year']))
else:
logs.info(u"{} {} ({})".format(
matching_total, item['title'], item['year']))
else:
missing_items.append((i, item))
nonmatching_idx.append(i)
if not self.use_playlists and not self.recipe['new_library']['sort_title']['absolute']:
for i in reversed(nonmatching_idx):
del item_list[i]
return matching_items, missing_items, matching_total, nonmatching_idx, max_count
def _create_symbolic_links(self, matching_items, matching_total):
logs.info(u"Creating symlinks for {count} matching items in the "
u"library...".format(count=matching_total))
try:
if not os.path.exists(self.recipe['new_library']['folder']):
os.mkdir(self.recipe['new_library']['folder'])
except:
logs.error(u"Unable to create the new library folder "
u"'{folder}'.".format(folder=self.recipe['new_library']['folder']))
logs.info(u"Exiting script.")
return 0
count = 0
updated_paths = []
new_items = []
if self.library_type == 'movie':
for movie in matching_items:
for part in movie.iterParts():
old_path_file = part.file
old_path, file_name = os.path.split(old_path_file)
folder_name = ''
for library_config in self.source_library_config:
for f in self.plex.get_library_paths(library_name=library_config['name']):
f = os.path.abspath(f)
if old_path.lower().startswith(f.lower()):
folder_name = os.path.relpath(old_path, f)
break
else:
continue
if folder_name == '.':
new_path = os.path.join(
self.recipe['new_library']['folder'],
file_name)
dir = False
else:
new_path = os.path.join(
self.recipe['new_library']['folder'],
folder_name)
dir = True
parent_path = os.path.dirname(
os.path.abspath(new_path))
if not os.path.exists(parent_path):
try:
os.makedirs(parent_path)
except OSError as e:
if e.errno == errno.EEXIST \
and os.path.isdir(parent_path):
pass
else:
raise
# Clean up old, empty directories
if os.path.exists(new_path) \
and not os.listdir(new_path):
os.rmdir(new_path)
if (dir and not os.path.exists(new_path)) \
or not dir and not os.path.isfile(new_path):
try:
if os.name == 'nt':
if dir:
subprocess.call(['mklink', '/D',
new_path, old_path],
shell=True)
else:
subprocess.call(['mklink', new_path,
old_path_file],
shell=True)
else:
if dir:
os.symlink(old_path, new_path)
else:
os.symlink(old_path_file, new_path)
count += 1
new_items.append(movie)
updated_paths.append(new_path)
except Exception as e:
logs.error(u"Symlink failed for {path}: {e}".format(
path=new_path, e=e))
else:
for tv_show in matching_items:
done = False
if done:
continue
for episode in tv_show.episodes():
if done:
break
for part in episode.iterParts():
old_path_file = part.file
old_path, file_name = os.path.split(old_path_file)
folder_name = ''
for library_config in self.source_library_config:
for f in self.plex.get_library_paths(library_name=library_config['name']):
if old_path.lower().startswith(f.lower()):
old_path = os.path.join(f,
old_path.replace(
f, '').strip(
os.sep).split(
os.sep)[0])
folder_name = os.path.relpath(old_path, f)
break
else:
continue
new_path = os.path.join(
self.recipe['new_library']['folder'],
folder_name)
if not os.path.exists(new_path):
try:
if os.name == 'nt':
subprocess.call(['mklink', '/D',
new_path, old_path],
shell=True)
else:
os.symlink(old_path, new_path)
count += 1
new_items.append(tv_show)
updated_paths.append(new_path)
done = True
break
except Exception as e:
logs.error(u"Symlink failed for {path}: {e}"
.format(path=new_path, e=e))
else:
done = True
break
logs.info(u"Created symlinks for {count} new items:".format(count=count))
for item in new_items:
logs.info(u"{title} ({year})".format(title=item.title, year=item.year))
def _verify_new_library_and_get_items(self, create_if_not_found=False):
# Check if the new library exists in Plex
try:
new_library = self.plex.server.library.section(
self.recipe['new_library']['name'])
logs.warning(u"Library already exists in Plex. Scanning the library...")
new_library.update()
except plexapi.exceptions.NotFound:
if create_if_not_found:
self.plex.create_new_library(
self.recipe['new_library']['name'],
self.recipe['new_library']['folder'],
self.library_type)
new_library = self.plex.server.library.section(
self.recipe['new_library']['name'])
else:
raise Exception("Library '{library}' does not exist".format(
library=self.recipe['new_library']['name']))
# Wait for metadata to finish downloading before continuing
logs.info(u"Waiting for metadata to finish downloading...")
new_library = self.plex.server.library.section(
self.recipe['new_library']['name'])
while new_library.refreshing:
time.sleep(5)
new_library = self.plex.server.library.section(
self.recipe['new_library']['name'])
# Retrieve a list of items from the new library
logs.info(u"Retrieving a list of items from the '{library}' library in "
u"Plex...".format(library=self.recipe['new_library']['name']))
return new_library, new_library.all()
def _get_imdb_dict(self, media_items, item_ids, force_match=False):
imdb_map = {}
for m in media_items:
imdb_id = None
tmdb_id = None
tvdb_id = None
if m.guid is not None and 'imdb://' in m.guid:
imdb_id = m.guid.split('imdb://')[1].split('?')[0]
elif m.guid is not None and 'themoviedb://' in m.guid:
tmdb_id = m.guid.split('themoviedb://')[1].split('?')[0]
elif m.guid is not None and 'thetvdb://' in m.guid:
tvdb_id = (m.guid.split('thetvdb://')[1]
.split('?')[0]
.split('/')[0])
else:
imdb_id = None
if imdb_id and str(imdb_id) in item_ids:
imdb_map[imdb_id] = m
elif tmdb_id and ('tmdb' + str(tmdb_id)) in item_ids:
imdb_map['tmdb' + str(tmdb_id)] = m
elif tvdb_id and ('tvdb' + str(tvdb_id)) in item_ids:
imdb_map['tvdb' + str(tvdb_id)] = m
elif force_match:
# Only IMDB ID found for some items
if tmdb_id:
imdb_id = self.tmdb.get_imdb_id(tmdb_id)
elif tvdb_id:
imdb_id = self.tvdb.get_imdb_id(tvdb_id)
if imdb_id and str(imdb_id) in item_ids:
imdb_map[imdb_id] = m
else:
imdb_map[m.ratingKey] = m
else:
imdb_map[m.ratingKey] = m
return imdb_map
def _modify_sort_titles_and_cleanup(self, item_list, imdb_map, new_library, sort_only=False):
if self.recipe['new_library']['sort']:
logs.info(u"Setting the sort titles for the '{}' library...".format(
self.recipe['new_library']['name']))
if self.recipe['new_library']['sort_title']['absolute']:
for i, m in enumerate(item_list):
item = imdb_map.pop(m['id'], None)
if not item:
item = imdb_map.pop('tmdb' + str(m.get('tmdb_id', '')),
None)
if not item:
item = imdb_map.pop('tvdb' + str(m.get('tvdb_id', '')),
None)
if item and self.recipe['new_library']['sort']:
self.plex.set_sort_title(
new_library.key, item.ratingKey, i + 1, m['title'],
self.library_type,
self.recipe['new_library']['sort_title']['format'],
self.recipe['new_library']['sort_title']['visible']
)
else:
i = 0
for m in item_list:
item = imdb_map.pop(m['id'], None)
if not item:
item = imdb_map.pop('tmdb' + str(m.get('tmdb_id', '')),
None)
if not item:
item = imdb_map.pop('tvdb' + str(m.get('tvdb_id', '')),
None)
if item and self.recipe['new_library']['sort']:
i += 1
self.plex.set_sort_title(
new_library.key, item.ratingKey, i, m['title'],
self.library_type,
self.recipe['new_library']['sort_title']['format'],
self.recipe['new_library']['sort_title']['visible']
)
if not sort_only and (
self.recipe['new_library']['remove_from_library'] or
self.recipe['new_library'].get('remove_old', False)):
# Remove old items that no longer qualify
self._remove_old_items_from_library(imdb_map=imdb_map)
elif sort_only:
return True
all_new_items = self._cleanup_new_library(new_library=new_library)
while imdb_map:
imdb_id, item = imdb_map.popitem()
i += 1
logs.info(u"{} {} ({})".format(i, item.title, item.year))
self.plex.set_sort_title(
new_library.key, item.ratingKey, i, item.title,
self.library_type,
self.recipe['new_library']['sort_title']['format'],
self.recipe['new_library']['sort_title']['visible'])
return all_new_items
def _remove_old_items_from_library(self, imdb_map):
logs.info(u"Removing symlinks for items "
"which no longer qualify ".format(library=self.recipe['new_library']['name']))
count = 0
updated_paths = []
deleted_items = []
max_date = add_years(
(self.recipe['new_library']['max_age'] or 0) * -1)
if self.library_type == 'movie':
exclude = []
for mid, movie in imdb_map.items():
if not self.recipe['new_library']['remove_from_library']:
# Only remove older than max_age
if not self.recipe['new_library']['max_age'] \
or (movie.originallyAvailableAt and
max_date < movie.originallyAvailableAt):
continue
for part in movie.iterParts():
old_path_file = part.file
old_path, file_name = os.path.split(old_path_file)
folder_name = os.path.relpath(
old_path, self.recipe['new_library']['folder'])
if folder_name == '.':
new_path = os.path.join(
self.recipe['new_library']['folder'],
file_name)
dir = False
else:
new_path = os.path.join(
self.recipe['new_library']['folder'],
folder_name)
dir = True
if (dir and os.path.exists(new_path)) or (
not dir and os.path.isfile(new_path)):
try:
if os.name == 'nt':
# Python 3.2+ only
if sys.version_info < (3, 2):
assert os.path.islink(new_path)
if dir:
os.rmdir(new_path)
else:
os.remove(new_path)
else:
assert os.path.islink(new_path)
os.unlink(new_path)
count += 1
deleted_items.append(movie)
updated_paths.append(new_path)
except Exception as e:
logs.error(u"Remove symlink failed for "
"{path}: {e}".format(path=new_path, e=e))
for mid in exclude:
imdb_map.pop(mid, None)
else:
for tv_show in imdb_map.values():
done = False
if done:
continue
for episode in tv_show.episodes():
if done:
break
for part in episode.iterParts():
if done:
break
old_path_file = part.file
old_path, file_name = os.path.split(old_path_file)
folder_name = ''
new_library_folder = \
self.recipe['new_library']['folder']
old_path = os.path.join(
new_library_folder,
old_path.replace(new_library_folder, '').strip(
os.sep).split(os.sep)[0])
folder_name = os.path.relpath(old_path,
new_library_folder)
new_path = os.path.join(
self.recipe['new_library']['folder'],
folder_name)
if os.path.exists(new_path):
try:
if os.name == 'nt':
# Python 3.2+ only
if sys.version_info < (3, 2):
assert os.path.islink(new_path)
os.rmdir(new_path)
else:
assert os.path.islink(new_path)
os.unlink(new_path)
count += 1
deleted_items.append(tv_show)
updated_paths.append(new_path)
done = True
break
except Exception as e:
logs.error(u"Remove symlink failed for "
"{path}: {e}".format(path=new_path,
e=e))
else:
done = True
break
logs.info(u"Removed symlinks for {count} items.".format(count=count))
for item in deleted_items:
logs.info(u"{title} ({year})".format(title=item.title,
year=item.year))
def _cleanup_new_library(self, new_library):
# Scan the library to clean up the deleted items
logs.info(u"Scanning the '{library}' library...".format(
library=self.recipe['new_library']['name']))
new_library.update()
time.sleep(10)
new_library = self.plex.server.library.section(
self.recipe['new_library']['name'])
while new_library.refreshing:
time.sleep(5)
new_library = self.plex.server.library.section(
self.recipe['new_library']['name'])
new_library.emptyTrash()
return new_library.all()
def _run(self, share_playlist_to_all=False):
# Get the trakt lists
item_list, item_ids = self._get_trakt_lists()
force_imdb_id_match = False
# Get list of items from the Plex server
source_libraries = self._get_plex_libraries()
# Create a list of matching items
matching_items, missing_items, matching_total, nonmatching_idx, max_count = self._get_matching_items(
source_libraries=source_libraries, item_list=item_list)
if self.use_playlists:
# Start playlist process
if self.recipe['new_playlist']['remove_from_playlist'] or self.recipe['new_playlist'].get('remove_old',
False):
# Start playlist over again
self.plex.reset_playlist(playlist_name=self.recipe['new_playlist']['name'], new_items=matching_items,
user_names=self.recipe['new_playlist'].get('share_to_users', []),
all_users=(share_playlist_to_all if share_playlist_to_all else
self.recipe['new_playlist'].get('share_to_all', False)))
else:
# Keep existing items
self.plex.add_to_playlist_for_users(playlist_name=self.recipe['new_playlist']['name'],
items=matching_items,
user_names=self.recipe['new_playlist'].get('share_to_users', []),
all_users=(share_playlist_to_all if share_playlist_to_all else
self.recipe['new_playlist'].get('share_to_all', False)))
playlist_items = self.plex.get_playlist_items(playlist_name=self.recipe['new_playlist']['name'])
return missing_items, (len(playlist_items) if playlist_items else 0)
else:
# Start library process
# Create symlinks for all items in your library on the trakt watched
self._create_symbolic_links(matching_items=matching_items, matching_total=matching_total)
# Post-process new library
logs.info(u"Creating the '{}' library in Plex...".format(
self.recipe['new_library']['name']))
new_library, all_new_items = self._verify_new_library_and_get_items(create_if_not_found=True)
# Create a dictionary of {imdb_id: item}
imdb_map = self._get_imdb_dict(media_items=all_new_items, item_ids=item_ids,
force_match=force_imdb_id_match)
# Modify the sort titles
all_new_items = self._modify_sort_titles_and_cleanup(item_list=item_list, imdb_map=imdb_map,
new_library=new_library, sort_only=False)
return missing_items, len(all_new_items)
def _run_sort_only(self):
item_list, item_ids = self._get_trakt_lists()
force_imdb_id_match = False
# Get existing library and its items
new_library, all_new_items = self._verify_new_library_and_get_items(create_if_not_found=False)
# Create a dictionary of {imdb_id: item}
imdb_map = self._get_imdb_dict(media_items=all_new_items, item_ids=item_ids, force_match=force_imdb_id_match)
# Modify the sort titles
_ = self._modify_sort_titles_and_cleanup(item_list=item_list, imdb_map=imdb_map, new_library=new_library,
sort_only=True)
return len(all_new_items)
def run(self, sort_only=False, share_playlist_to_all=False):
if sort_only:
logs.info(u"Running the recipe '{}', sorting only".format(
self.recipe_name))
list_count = self._run_sort_only()
logs.info(u"Number of items in the new {library_or_playlist}: {count}".format(
count=list_count, library_or_playlist=('playlist' if self.use_playlists else 'library')))
else:
logs.info(u"Running the recipe '{}'".format(self.recipe_name))
missing_items, list_count = self._run(share_playlist_to_all=share_playlist_to_all)
logs.info(u"Number of items in the new {library_or_playlist}: {count}".format(
count=list_count, library_or_playlist=('playlist' if self.use_playlists else 'library')))
logs.info(u"Number of missing items: {count}".format(
count=len(missing_items)))
for idx, item in missing_items:
logs.info(u"{idx}\t{release}\t{imdb_id}\t{title} ({year})".format(
idx=idx + 1, release=item.get('release_date', ''),
imdb_id=item['id'], title=item['title'],
year=item['year']))
def weighted_sorting(self, item_list):
def _get_non_theatrical_release(release_dates):
# Returns earliest release date that is not theatrical
# TODO PREDB
types = {}
for country in release_dates.get('results', []):
# FIXME Look at others too?
if country['iso_3166_1'] != 'US':
continue
for d in country['release_dates']:
if d['type'] in (4, 5, 6):
# 4: Digital, 5: Physical, 6: TV
types[str(d['type'])] = datetime.datetime.strptime(
d['release_date'], '%Y-%m-%dT%H:%M:%S.%fZ').date()
break
release_date = None
for t, d in types.items():
if not release_date or d < release_date:
release_date = d
return release_date
def _get_age_weight(days):
if self.library_type == 'movie':
# Everything younger than this will get 1
min_days = 180
# Everything older than this will get 0
max_days = (float(self.recipe['new_library']['max_age'])
/ 4.0 * 365.25 or 360)
else:
min_days = 14
max_days = (float(self.recipe['new_library']['max_age'])
/ 4.0 * 365.25 or 180)
if days <= min_days:
return 1
elif days >= max_days:
return 0
else:
return 1 - (days - min_days) / (max_days - min_days)
total_items = len(item_list)
weights = self.recipe['weighted_sorting']['weights']
# TMDB details
today = datetime.date.today()
total_tmdb_vote = 0.0
tmdb_votes = []
for i, m in enumerate(item_list):
m['original_idx'] = i + 1
details = self.tmdb.get_details(m['tmdb_id'], self.library_type)
if not details:
logs.warning(u"Warning: No TMDb data for {}".format(m['title']))
continue
m['tmdb_popularity'] = float(details['popularity'])
m['tmdb_vote'] = float(details['vote_average'])
m['tmdb_vote_count'] = int(details['vote_count'])
if self.library_type == 'movie':
if self.recipe['weighted_sorting']['better_release_date']:
m['release_date'] = _get_non_theatrical_release(
details['release_dates']) or \
datetime.datetime.strptime(
details['release_date'],
'%Y-%m-%d').date()
else:
m['release_date'] = datetime.datetime.strptime(
details['release_date'], '%Y-%m-%d').date()
item_age_td = today - m['release_date']
elif self.library_type == 'tv':
try:
m['last_air_date'] = datetime.datetime.strptime(
details['last_air_date'], '%Y-%m-%d').date()
except TypeError:
m['last_air_date'] = today
item_age_td = today - m['last_air_date']
m['genres'] = [g['name'].lower() for g in details['genres']]
m['age'] = item_age_td.days
if (self.library_type == 'tv' or m['tmdb_vote_count'] > 150 or
m['age'] > 50):
tmdb_votes.append(m['tmdb_vote'])
total_tmdb_vote += m['tmdb_vote']
item_list[i] = m
tmdb_votes.sort()
for i, m in enumerate(item_list):
# Distribute all weights evenly from 0 to 1 (times global factor)
# More weight means it'll go higher in the final list
index_weight = float(total_items - i) / float(total_items)
m['index_weight'] = index_weight * weights['index']
if m.get('tmdb_popularity'):
if (self.library_type == 'tv' or
m.get('tmdb_vote_count') > 150 or m['age'] > 50):
vote_weight = ((tmdb_votes.index(m['tmdb_vote']) + 1)
/ float(len(tmdb_votes)))
else:
# Assume below average rating for new/less voted items
vote_weight = 0.25
age_weight = _get_age_weight(float(m['age']))
if weights.get('random'):
random_weight = random.random()
m['random_weight'] = random_weight * weights['random']
else:
m['random_weight'] = 0.0
m['vote_weight'] = vote_weight * weights['vote']
m['age_weight'] = age_weight * weights['age']
weight = (m['index_weight'] + m['vote_weight']
+ m['age_weight'] + m['random_weight'])
for genre, value in weights['genre_bias'].items():
if genre.lower() in m['genres']:
weight *= value
m['weight'] = weight
else:
m['vote_weight'] = 0.0
m['age_weight'] = 0.0
m['weight'] = index_weight
item_list[i] = m
item_list.sort(key=lambda m: m['weight'], reverse=True)
for i, m in enumerate(item_list):
if (i + 1) < m['original_idx']:
net = Colors.GREEN + u'↑'
elif (i + 1) > m['original_idx']:
net = Colors.RED + u'↓'
else:
net = u' '
net += str(abs(i + 1 - m['original_idx'])).rjust(3)
try:
# TODO
logs.info(u"{} {:>3}: trnd:{:>3}, w_trnd:{:0<5}; vote:{}, "
"w_vote:{:0<5}; age:{:>4}, w_age:{:0<5}; w_rnd:{:0<5}; "
"w_cmb:{:0<5}; {} {}{}"
.format(net, i + 1, m['original_idx'],
round(m['index_weight'], 3),
m.get('tmdb_vote', 0.0),
round(m['vote_weight'], 3), m.get('age', 0),
round(m['age_weight'], 3),
round(m.get('random_weight', 0), 3),
round(m['weight'], 3), str(m['title']),
str(m['year']), Colors.RESET))
except UnicodeEncodeError:
pass
return item_list
| 46.64908 | 119 | 0.467082 |
3710574f81ec7fa29060ad195b3e399695809658 | 641 | py | Python | math/Gaussian_Elimination.py | vincentclaes/algorithms | 91626190c3b26c7eb41742a58f96c4bc371ddea9 | [
"MIT"
] | 2 | 2021-07-27T19:20:40.000Z | 2021-08-22T03:39:21.000Z | math/Gaussian_Elimination.py | UKVeteran/algorithms | 6b664bdb8b672143e9906fc7ec7870eac1fd4ba6 | [
"MIT"
] | null | null | null | math/Gaussian_Elimination.py | UKVeteran/algorithms | 6b664bdb8b672143e9906fc7ec7870eac1fd4ba6 | [
"MIT"
] | 1 | 2021-07-13T20:38:45.000Z | 2021-07-13T20:38:45.000Z | from numpy import array, column_stack, zeros
from sys import exit
n=3
X= array((3,2,1), dtype="f")
Y= array((2,3,2), dtype="f")
Z= array((1,1,3), dtype="f")
C= array((90,77,46), dtype="f")
a=column_stack((X,Y,Z,C))
x=zeros(n)
for i in range(n):
if a[i][i]==0:
sys.exit("No Solution")
for j in range(i+1, n):
ratio= a[j][i]/a[i][i]
for k in range(n+1):
a[j][k] -=ratio*a[i][k]
x[n-1]=a[n-1][n]/a[n-1][n-1]
for i in range(n-2,-1,-1):
x[i]=a[i][n]
for j in range(i+1,n):
x[i]-=a[i][j]*x[j]
x[i]= x[i]/a[i][i]
| 19.424242 | 45 | 0.447738 |
10247675fd22b89643a6f01d546bf6ff5c4c47f9 | 5,099 | py | Python | test/jpypetest/test_jmethod.py | fuz-woo/jpype | 3ffb1e7a75402545c1d669f4bc5836b08b76b6ae | [
"Apache-2.0"
] | 1 | 2020-01-03T06:03:14.000Z | 2020-01-03T06:03:14.000Z | test/jpypetest/test_jmethod.py | fuz-woo/jpype | 3ffb1e7a75402545c1d669f4bc5836b08b76b6ae | [
"Apache-2.0"
] | null | null | null | test/jpypetest/test_jmethod.py | fuz-woo/jpype | 3ffb1e7a75402545c1d669f4bc5836b08b76b6ae | [
"Apache-2.0"
] | null | null | null | # *****************************************************************************
# Copyright 2017 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import sys
import jpype
import common
import types
import functools
import inspect
# Code from stackoverflow
# Reference http://stackoverflow.com/questions/13503079/how-to-create-a-copy-of-a-python-function
def copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
if sys.version_info[0] < 3:
g = types.FunctionType(f.func_code, f.func_globals, name=f.func_name,
argdefs=f.func_defaults,
closure=f.func_closure)
else:
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
g.__kwdefaults__ = f.__kwdefaults__
g = functools.update_wrapper(g, f)
return g
class JMethodTestCase(common.JPypeTestCase):
""" Test for methods of JMethod (_jpype.PyJPMethod)
This should test how well the object matchs a Python3 function.
* __self__: should appear on a bound, None otherwise.
* __name__: should be set
* __qualname__: should be set
* __doc__: should be set
* __annotations__: should give return type
* __defaults__, __kwdefaults__, __code__, __globals__, __closure__: should
be enough to "clone" the method.
It should also be callable as a method, class method.
Further inspect should work
* inspect.getdoc() should match __doc__
* inspect.signature() should work
* inspect.isroutine() should be True
We are not going to try to pretend to be Python2.
"""
def setUp(self):
common.JPypeTestCase.setUp(self)
self.cls = jpype.JClass('java.lang.String')
self.obj = self.cls('foo')
def testMethodSelf(self):
self.assertEqual(self.cls.substring.__self__, None)
self.assertEqual(self.obj.substring.__self__, self.obj)
def testMethodName(self):
self.assertEqual(self.cls.substring.__name__, "substring")
self.assertEqual(self.obj.substring.__name__, "substring")
@common.unittest.skipIf(sys.version_info[0] < 3, "skip on Python2")
def testMethodQualName(self):
self.assertEqual(self.cls.substring.__qualname__, "java.lang.String.substring")
self.assertEqual(self.obj.substring.__qualname__, "java.lang.String.substring")
def testMethodDoc(self):
self.assertIsInstance(self.cls.substring.__doc__, str)
self.assertIsInstance(self.obj.substring.__doc__, str)
def testMethodInspectDoc(self):
self.assertIsInstance(inspect.getdoc(self.cls.substring), str)
self.assertIsInstance(inspect.getdoc(self.obj.substring), str)
def testMethodAnnotations(self):
self.assertIsInstance(self.cls.substring.__annotations__, dict)
self.assertIsNotNone(self.obj.substring.__annotations__, dict)
# This one will need to change in Python 3.8
self.assertEqual(self.cls.substring.__annotations__["return"], self.cls)
@common.unittest.skipIf(sys.version_info[0] < 3, "skip on Python2")
def testMethodInspectSignature(self):
self.assertIsInstance(inspect.signature(self.cls.substring), inspect.Signature)
self.assertIsInstance(inspect.signature(self.obj.substring), inspect.Signature)
self.assertEqual(inspect.signature(self.obj.substring).return_annotation, self.cls)
def testMethodInspectFunction(self):
self.assertTrue(inspect.isfunction(self.cls.substring))
self.assertTrue(inspect.isfunction(self.obj.substring))
def testMethodInspectRoutine(self):
self.assertTrue(inspect.isroutine(self.cls.substring))
self.assertTrue(inspect.isroutine(self.obj.substring))
def testMethodClassCall(self):
self.assertEqual(self.cls.substring(self.obj, 1), "oo")
def testMethodClassCallWierd(self):
self.assertEqual(self.cls.substring("foo", 1), "oo")
def testMethodClassCallFail(self):
with self.assertRaises(TypeError):
self.cls.substring(1, 1)
def testMethodCall(self):
self.assertEqual(self.obj.substring(1), "oo")
def testMethodClone(self):
a = copy_func(self.cls.substring)
self.assertEqual(a(self.obj, 1), "oo")
a = copy_func(self.obj.substring)
self.assertEqual(a(1), "oo")
| 39.527132 | 97 | 0.669739 |
1ece54baf6c04d628124f3878a28c50766d0559d | 12,867 | py | Python | simulator1.py | vyshnavigutta369/minitictactoebot | 1037b08cafcfc2bb70cbfddf98a17be6f21568e7 | [
"MIT"
] | null | null | null | simulator1.py | vyshnavigutta369/minitictactoebot | 1037b08cafcfc2bb70cbfddf98a17be6f21568e7 | [
"MIT"
] | null | null | null | simulator1.py | vyshnavigutta369/minitictactoebot | 1037b08cafcfc2bb70cbfddf98a17be6f21568e7 | [
"MIT"
] | null | null | null | '''
This is the engine for the Ultimate TicTacToe Tournament. The code in this file is not for reproduction.
@author: Devansh Shah
The structure of the code is as below:
1. Header Files
2. Sample implementations of your class (Player and ManualPlayer)
3. Game Logic
4. Game simulator
In case of any queries, please post on moodle.iiit.ac.in
'''
import sys
import random
import signal
import Player
from Player import Player11
def handler(signum, frame):
#print 'Signal handler called with signal', signum
raise TimedOutExc()
class ManualPlayer:
def __init__(self):
pass
def move(self, temp_board, temp_block, old_move, flag):
print 'Enter your move: <format:row column> (you\'re playing with', flag + ")"
mvp = raw_input()
mvp = mvp.split()
return (int(mvp[0]), int(mvp[1]))
class Player83:
def __init__(self):
# You may initialize your object here and use any variables for storing throughout the game
pass
def move(self,temp_board,temp_block,old_move,flag):
#List of permitted blocks, based on old move.
blocks_allowed = determine_blocks_allowed(old_move, temp_block)
#Get list of empty valid cells
cells = get_empty_out_of(temp_board, blocks_allowed,temp_block)
#Choose a move based on some algorithm, here it is a random move.
return cells[random.randrange(len(cells))]
class Player2:
def __init__(self):
# You may initialize your object here and use any variables for storing throughout the game
pass
def move(self,temp_board,temp_block,old_move,flag):
#List of permitted blocks, based on old move.
blocks_allowed = determine_blocks_allowed(old_move, temp_block)
#Get list of empty valid cells
cells = get_empty_out_of(temp_board, blocks_allowed,temp_block)
#Choose a move based on some algorithm, here it is a random move.
return cells[random.randrange(len(cells))]
def determine_blocks_allowed(old_move, block_stat):
blocks_allowed = []
if old_move[0] % 3 == 0 and old_move[1] % 3 == 0:
blocks_allowed = [1,3]
elif old_move[0] % 3 == 0 and old_move[1] % 3 == 2:
blocks_allowed = [1,5]
elif old_move[0] % 3 == 2 and old_move[1] % 3 == 0:
blocks_allowed = [3,7]
elif old_move[0] % 3 == 2 and old_move[1] % 3 == 2:
blocks_allowed = [5,7]
elif old_move[0] % 3 == 0 and old_move[1] % 3 == 1:
blocks_allowed = [0,2]
elif old_move[0] % 3 == 1 and old_move[1] % 3 == 0:
blocks_allowed = [0,6]
elif old_move[0] % 3 == 2 and old_move[1] % 3 == 1:
blocks_allowed = [6,8]
elif old_move[0] % 3 == 1 and old_move[1] % 3 == 2:
blocks_allowed = [2,8]
elif old_move[0] % 3 == 1 and old_move[1] % 3 == 1:
blocks_allowed = [4]
else:
sys.exit(1)
final_blocks_allowed = []
for i in blocks_allowed:
if block_stat[i] == '-':
final_blocks_allowed.append(i)
return final_blocks_allowed
#Initializes the game
def get_init_board_and_blockstatus():
board = []
for i in range(9):
row = ['-']*9
board.append(row)
block_stat = ['-']*9
return board, block_stat
# Checks if player has messed with the board. Don't mess with the board that is passed to your move function.
def verification_fails_board(board_game, temp_board_state):
return board_game == temp_board_state
# Checks if player has messed with the block. Don't mess with the block array that is passed to your move function.
def verification_fails_block(block_stat, temp_block_stat):
return block_stat == temp_block_stat
#Gets empty cells from the list of possible blocks. Hence gets valid moves.
def get_empty_out_of(gameb, blal,block_stat):
cells = [] # it will be list of tuples
#Iterate over possible blocks and get empty cells
for idb in blal:
id1 = idb/3
id2 = idb%3
for i in range(id1*3,id1*3+3):
for j in range(id2*3,id2*3+3):
if gameb[i][j] == '-':
cells.append((i,j))
# If all the possible blocks are full, you can move anywhere
if cells == []:
new_blal = []
all_blal = [0,1,2,3,4,5,6,7,8]
for i in all_blal:
if block_stat[i]=='-':
new_blal.append(i)
for idb in new_blal:
id1 = idb/3
id2 = idb%3
for i in range(id1*3,id1*3+3):
for j in range(id2*3,id2*3+3):
if gameb[i][j] == '-':
cells.append((i,j))
return cells
if cells == []:
for i in range(9):
for j in range(9):
no = (i/3)*3
no += (j/3)
if gameb[i][j] == '-' and block_stat[no] == '-':
cells.append((i,j))
return cells
# Returns True if move is valid
def check_valid_move(game_board, block_stat, current_move, old_move):
# first we need to check whether current_move is tuple of not
# old_move is guaranteed to be correct
if type(current_move) is not tuple:
return False
if len(current_move) != 2:
return False
a = current_move[0]
b = current_move[1]
if type(a) is not int or type(b) is not int:
return False
if a < 0 or a > 8 or b < 0 or b > 8:
return False
#Special case at start of game, any move is okay!
if old_move[0] == -1 and old_move[1] == -1:
return True
#List of permitted blocks, based on old move.
blocks_allowed = determine_blocks_allowed(old_move, block_stat)
# We get all the empty cells in allowed blocks. If they're all full, we get all the empty cells in the entire board.
cells = get_empty_out_of(game_board, blocks_allowed, block_stat)
#Checks if you made a valid move.
if current_move in cells:
return True
else:
return False
def update_lists(game_board, block_stat, move_ret, fl):
game_board[move_ret[0]][move_ret[1]] = fl
block_no = (move_ret[0]/3)*3 + move_ret[1]/3
id1 = block_no/3
id2 = block_no%3
mflg = 0
flag = 0
for i in range(id1*3,id1*3+3):
for j in range(id2*3,id2*3+3):
if game_board[i][j] == '-':
flag = 1
if flag == 0:
block_stat[block_no] = 'D'
if block_stat[block_no] == '-':
if game_board[id1*3][id2*3] == game_board[id1*3+1][id2*3+1] and game_board[id1*3+1][id2*3+1] == game_board[id1*3+2][id2*3+2] and game_board[id1*3+1][id2*3+1] != '-' and game_board[id1*3+1][id2*3+1] != 'D':
mflg=1
if game_board[id1*3+2][id2*3] == game_board[id1*3+1][id2*3+1] and game_board[id1*3+1][id2*3+1] == game_board[id1*3][id2*3 + 2] and game_board[id1*3+1][id2*3+1] != '-' and game_board[id1*3+1][id2*3+1] != 'D':
mflg=1
if mflg != 1:
for i in range(id2*3,id2*3+3):
if game_board[id1*3][i]==game_board[id1*3+1][i] and game_board[id1*3+1][i] == game_board[id1*3+2][i] and game_board[id1*3][i] != '-' and game_board[id1*3][i] != 'D':
mflg = 1
break
if mflg != 1:
for i in range(id1*3,id1*3+3):
if game_board[i][id2*3]==game_board[i][id2*3+1] and game_board[i][id2*3+1] == game_board[i][id2*3+2] and game_board[i][id2*3] != '-' and game_board[i][id2*3] != 'D':
mflg = 1
break
if mflg == 1:
block_stat[block_no] = fl
return mflg
#Check win
def terminal_state_reached(game_board, block_stat,point1,point2):
### we are now concerned only with block_stat
bs = block_stat
## Row win
if (bs[0] == bs[1] and bs[1] == bs[2] and bs[1]!='-' and bs[1]!='D') or (bs[3]!='-' and bs[3]!='D' and bs[3] == bs[4] and bs[4] == bs[5]) or (bs[6]!='D' and bs[6]!='-' and bs[6] == bs[7] and bs[7] == bs[8]):
return True, 'W'
## Col win
elif (bs[0] == bs[3] and bs[3] == bs[6] and bs[0]!='-' and bs[0]!='D') or (bs[1] == bs[4] and bs[4] == bs[7] and bs[4]!='-' and bs[4]!='D') or (bs[2] == bs[5] and bs[5] == bs[8] and bs[5]!='-' and bs[5]!='D'):
return True, 'W'
## Diag win
elif (bs[0] == bs[4] and bs[4] == bs[8] and bs[0]!='-' and bs[0]!='D') or (bs[2] == bs[4] and bs[4] == bs[6] and bs[2]!='-' and bs[2]!='D'):
return True, 'W'
else:
smfl = 0
for i in range(9):
if block_stat[i] == '-':
smfl = 1
break
if smfl == 1:
return False, 'Continue'
else:
if point1>point2:
return True, 'P1'
elif point2>point1:
return True, 'P2'
else:
return True, 'D'
def decide_winner_and_get_message(player,status, message):
if status == 'P1':
return ('P1', 'MORE BLOCKS')
elif status == 'P2':
return ('P2', 'MORE BLOCKS')
elif player == 'P1' and status == 'L':
return ('P2',message)
elif player == 'P1' and status == 'W':
return ('P1',message)
elif player == 'P2' and status == 'L':
return ('P1',message)
elif player == 'P2' and status == 'W':
return ('P2',message)
else:
return ('NONE','DRAW')
return
def print_lists(gb, bs):
print '=========== Game Board ==========='
for i in range(9):
if i > 0 and i % 3 == 0:
print
for j in range(9):
if j > 0 and j % 3 == 0:
print " " + gb[i][j],
else:
print gb[i][j],
print
print "=================================="
print "=========== Block Status ========="
for i in range(0, 9, 3):
print bs[i] + " " + bs[i+1] + " " + bs[i+2]
print "=================================="
print
def simulate(obj1,obj2):
# Game board is a 9x9 list of lists & block_stat is a list of 9 elements indicating if a block has been won.
game_board, block_stat = get_init_board_and_blockstatus()
pl1 = obj1
pl2 = obj2
# Player with flag 'x' will start the game
pl1_fl = 'x'
pl2_fl = 'o'
old_move = (-1, -1) # For the first move
WINNER = ''
MESSAGE = ''
TIMEALLOWED = 12000
p1_pts=0
p2_pts=0
print_lists(game_board, block_stat)
while(1): # Main game loop
temp_board_state = game_board[:]
temp_block_stat = block_stat[:]
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEALLOWED)
ret_move_pl1 = pl1.move(temp_board_state, temp_block_stat, old_move, pl1_fl)
# try:
# ret_move_pl1 = pl1.move(temp_board_state, temp_block_stat, old_move, pl1_fl)
# except:
# WINNER, MESSAGE = decide_winner_and_get_message('P1', 'L', 'TIMED OUT')
# print MESSAGE
# break
signal.alarm(0)
# Check if list is tampered.
if not (verification_fails_board(game_board, temp_board_state) and verification_fails_block(block_stat, temp_block_stat)):
WINNER, MESSAGE = decide_winner_and_get_message('P1', 'L', 'MODIFIED CONTENTS OF LISTS')
break
# Check if the returned move is valid
if not check_valid_move(game_board, block_stat, ret_move_pl1, old_move):
WINNER, MESSAGE = decide_winner_and_get_message('P1', 'L', 'MADE AN INVALID MOVE')
break
print "Player 1 made the move:", ret_move_pl1, 'with', pl1_fl
# Update the 'game_board' and 'block_stat' move
p1_pts += update_lists(game_board, block_stat, ret_move_pl1, pl1_fl)
gamestatus, mesg = terminal_state_reached(game_board, block_stat,p1_pts,p2_pts)
if gamestatus == True:
print_lists(game_board, block_stat)
WINNER, MESSAGE = decide_winner_and_get_message('P1', mesg, 'COMPLETE')
break
old_move = ret_move_pl1
print_lists(game_board, block_stat)
temp_board_state = game_board[:]
temp_block_stat = block_stat[:]
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEALLOWED)
try:
ret_move_pl2 = pl2.move(temp_board_state, temp_block_stat, old_move, pl2_fl)
except:
WINNER, MESSAGE = decide_winner_and_get_message('P2', 'L', 'TIMED OUT')
break
signal.alarm(0)
if not (verification_fails_board(game_board, temp_board_state) and verification_fails_block(block_stat, temp_block_stat)):
WINNER, MESSAGE = decide_winner_and_get_message('P2', 'L', 'MODIFIED CONTENTS OF LISTS')
break
if not check_valid_move(game_board, block_stat, ret_move_pl2, old_move):
WINNER, MESSAGE = decide_winner_and_get_message('P2', 'L', 'MADE AN INVALID MOVE')
break
print "Player 2 made the move:", ret_move_pl2, 'with', pl2_fl
p2_pts += update_lists(game_board, block_stat, ret_move_pl2, pl2_fl)
# Now check if the last move resulted in a terminal state
gamestatus, mesg = terminal_state_reached(game_board, block_stat,p1_pts,p2_pts)
if gamestatus == True:
print_lists(game_board, block_stat)
WINNER, MESSAGE = decide_winner_and_get_message('P2', mesg, 'COMPLETE' )
break
else:
old_move = ret_move_pl2
print_lists(game_board, block_stat)
print WINNER
print MESSAGE
if __name__ == '__main__':
## get game playing objects
if len(sys.argv) != 2:
print 'Usage: python simulator.py <option>'
print '<option> can be 1 => Random player vs. Random player'
print ' 2 => Human vs. Random Player'
print ' 3 => Human vs. Human'
sys.exit(1)
obj1 = ''
obj2 = ''
option = sys.argv[1]
if option == '1':
obj1 = Player11()
obj2 = Player2()
elif option == '2':
obj1 = Player11()
obj2 = ManualPlayer()
elif option == '3':
obj1 = ManualPlayer()
obj2 = ManualPlayer()
else:
print 'Invalid option'
sys.exit(1)
num = random.uniform(0,1)
if num > 0.5:
simulate(obj2, obj1)
else:
simulate(obj1, obj2)
| 29.376712 | 210 | 0.642885 |
af3abc72ee85ce95b6ae25a25bd288a46daa3db3 | 3,999 | py | Python | tools/inference.py | UniBester/AGE | a9844fcbb88a268fd3743a3bc346767e8e4114fe | [
"MIT"
] | 17 | 2022-03-22T03:10:39.000Z | 2022-03-29T13:54:09.000Z | tools/inference.py | UniBester/AGE | a9844fcbb88a268fd3743a3bc346767e8e4114fe | [
"MIT"
] | 2 | 2022-03-24T14:08:34.000Z | 2022-03-26T04:25:52.000Z | tools/inference.py | UniBester/AGE | a9844fcbb88a268fd3743a3bc346767e8e4114fe | [
"MIT"
] | 2 | 2022-03-26T02:34:44.000Z | 2022-03-31T13:50:37.000Z | import torch
import os
from argparse import Namespace
from tqdm import tqdm
import numpy as np
from PIL import Image
import torch
import sys
import random
sys.path.append(".")
sys.path.append("..")
from configs import data_configs
from utils.common import tensor2im
from options.test_options import TestOptions
from models.age import AGE
def get_n_distribution(net, transform, class_embeddings, opts):
samples=os.listdir(opts.train_data_path)
xs=[]
for s in tqdm(samples):
cate=s.split('_')[0]
av_codes=class_embeddings[cate].cuda()
from_im = Image.open(os.path.join(opts.train_data_path,s))
from_im = from_im.convert('RGB')
from_im = transform(from_im)
with torch.no_grad():
x=net.get_code(from_im.unsqueeze(0).to("cuda").float(), av_codes.unsqueeze(0))['x']
x=torch.stack(x)
xs.append(x)
codes=torch.stack(xs).squeeze(2).squeeze(2).permute(1,0,2).cpu().numpy()
mean=np.mean(codes,axis=1)
mean_abs=np.mean(np.abs(codes),axis=1)
cov=[]
for i in range(codes.shape[0]):
cov.append(np.cov(codes[i].T))
os.makedirs(opts.n_distribution_path, exist_ok=True)
np.save(os.path.join(opts.n_distribution_path, 'n_distribution.npy'),{'mean':mean, 'mean_abs':mean_abs, 'cov':cov})
def sampler(outputs, dist, opts):
means=dist['mean']
means_abs=dist['mean_abs']
covs=dist['cov']
one = torch.ones_like(torch.from_numpy(means[0]))
zero = torch.zeros_like(torch.from_numpy(means[0]))
dws=[]
groups=[[0,1,2],[3,4,5]]
for i in range(means.shape[0]):
x=torch.from_numpy(np.random.multivariate_normal(mean=means[i], cov=covs[i], size=1)).float().cuda()
mask = torch.where(torch.from_numpy(means_abs[i])>opts.beta, one, zero).cuda()
x=x*mask
for g in groups[i]:
dw=torch.matmul(outputs['A'][g], x.transpose(0,1)).squeeze(-1)
dws.append(dw)
dws=torch.stack(dws)
codes = torch.cat(((opts.alpha*dws.unsqueeze(0)+ outputs['ocodes'][:, :6]), outputs['ocodes'][:, 6:]), dim=1)
return codes
if __name__=='__main__':
SEED = 0
random.seed(SEED)
np.random.seed(SEED)
#load model
test_opts = TestOptions().parse()
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
if 'learn_in_w' not in opts:
opts['learn_in_w'] = False
if 'output_size' not in opts:
opts['output_size'] = 1024
opts = Namespace(**opts)
net = AGE(opts)
net.eval()
net.cuda()
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
transform=transforms_dict['transform_inference']
# get n distribution (only needs to be executed once)
# class_embeddings=torch.load(os.path.join(test_opts.class_embedding_path, 'class_embeddings.pt'))
# get_n_distribution(net, transform, class_embeddings, test_opts)
# generate data
dist=np.load(os.path.join(opts.n_distribution_path, 'n_distribution.npy'), allow_pickle=True).item()
test_data_path=test_opts.test_data_path
output_path=test_opts.output_path
os.makedirs(output_path, exist_ok=True)
from_ims = os.listdir(test_data_path)
for from_im_name in from_ims:
for j in tqdm(range(test_opts.n_images)):
from_im = Image.open(os.path.join(test_data_path, from_im_name))
from_im = from_im.convert('RGB')
from_im = transform(from_im)
outputs = net.get_test_code(from_im.unsqueeze(0).to("cuda").float())
codes=sampler(outputs, dist, test_opts)
with torch.no_grad():
res0 = net.decode(codes, randomize_noise=False, resize=opts.resize_outputs)
res0 = tensor2im(res0[0])
im_save_path = os.path.join(output_path, from_im_name+'_'+str(j)+'.jpg')
Image.fromarray(np.array(res0)).save(im_save_path)
| 34.773913 | 119 | 0.662666 |
0aed884ea99152634c395c1e2fcfe04a932262d1 | 1,198 | py | Python | agents/agent_module_dqn/features/test.py | pedMatias/matias_hfo | 6d88e1043a1455f5c1f6cc11b9380869772f4176 | [
"MIT"
] | 1 | 2021-06-03T20:03:50.000Z | 2021-06-03T20:03:50.000Z | agents/agent_module_dqn/features/test.py | pedMatias/matias_hfo | 6d88e1043a1455f5c1f6cc11b9380869772f4176 | [
"MIT"
] | null | null | null | agents/agent_module_dqn/features/test.py | pedMatias/matias_hfo | 6d88e1043a1455f5c1f6cc11b9380869772f4176 | [
"MIT"
] | 1 | 2021-03-14T01:22:33.000Z | 2021-03-14T01:22:33.000Z | #!/usr/bin/hfo_env python3
# encoding utf-8
import argparse
from agents.agent_module_dqn.deep_agent import DQNAgent
from actions_levels.action_module import DiscreteActionsModule
from agents.agent_module_dqn.features.discrete_features import \
DiscreteFeatures1Teammate
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_opponents', type=int, default=0)
parser.add_argument('--num_teammates', type=int, default=0)
parser.add_argument('--train_mode')
parser.add_argument('--num_ep', type=int, default=0)
parser.add_argument('--load_file', type=str, default=None)
# Parse arguments:
args = parser.parse_args()
num_team = args.num_teammates
num_op = args.num_opponents
num_episodes = args.num_ep
load_file = args.load_file
actions = DiscreteActionsModule()
features = DiscreteFeatures1Teammate(num_op=num_op, num_team=num_team)
# Start Player:
agent = DQNAgent(features.get_num_features(), actions.get_num_actions())
# Test Player
agent.epsilon = 0
agent.load_model(load_file)
av_win_rate = player.test(num_episodes)
print("Average win rate = {}".format(av_win_rate))
| 36.30303 | 76 | 0.740401 |
2a1e3015eece50a07be45855c2adfbb99e3b84d9 | 5,840 | py | Python | third_party/learn_joint_bpe_and_vocab.py | YinghuaHuang/daguan-2019 | b0f6b36dba6467fcb1b26c92c78c6be3bd3063b1 | [
"MIT"
] | null | null | null | third_party/learn_joint_bpe_and_vocab.py | YinghuaHuang/daguan-2019 | b0f6b36dba6467fcb1b26c92c78c6be3bd3063b1 | [
"MIT"
] | null | null | null | third_party/learn_joint_bpe_and_vocab.py | YinghuaHuang/daguan-2019 | b0f6b36dba6467fcb1b26c92c78c6be3bd3063b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
# The MIT License (MIT)
# Copyright (c) 2015 University of Edinburgh
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text.
This script learns BPE jointly on a concatenation of a list of texts (typically the source and target side of a parallel corpus,
applies the learned operation to each and (optionally) returns the resulting vocabulary of each text.
The vocabulary can be used in apply_bpe.py to avoid producing symbols that are rare or OOV in a training text.
Reference:
Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.
Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
"""
from __future__ import unicode_literals
import sys
import os
import codecs
import argparse
import tempfile
from collections import Counter
import learn_bpe
import apply_bpe
# hack for python2/3 compatibility
from io import open
argparse.open = open
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="learn BPE-based word segmentation")
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), required=True, nargs = '+',
metavar='PATH',
help="Input texts (multiple allowed).")
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), required=True,
metavar='PATH',
help="Output file for BPE codes.")
parser.add_argument(
'--symbols', '-s', type=int, default=10000,
help="Create this many new symbols (each representing a character n-gram) (default: %(default)s))")
parser.add_argument(
'--separator', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s'))")
parser.add_argument(
'--write-vocabulary', type=argparse.FileType('w'), nargs = '+', default=None,
metavar='PATH', dest='vocab',
help='Write to these vocabulary files after applying BPE. One per input text. Used for filtering in apply_bpe.py')
parser.add_argument(
'--min-frequency', type=int, default=2, metavar='FREQ',
help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s))')
parser.add_argument(
'--verbose', '-v', action="store_true",
help="verbose mode.")
return parser
if __name__ == '__main__':
# python 2/3 compatibility
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
parser = create_parser()
args = parser.parse_args()
if args.vocab and len(args.input) != len(args.vocab):
sys.stderr.write('Error: number of input files and vocabulary files must match\n')
sys.exit(1)
# read/write files as UTF-8
args.input = [codecs.open(f.name, encoding='UTF-8') for f in args.input]
args.vocab = [codecs.open(f.name, 'w', encoding='UTF-8') for f in args.vocab]
# get combined vocabulary of all input texts
full_vocab = Counter()
for f in args.input:
full_vocab += learn_bpe.get_vocabulary(f)
f.seek(0)
vocab_list = ['{0} {1}'.format(key, freq) for (key, freq) in full_vocab.items()]
# learn BPE on combined vocabulary
with codecs.open(args.output.name, 'w', encoding='UTF-8') as output:
learn_bpe.main(vocab_list, output, args.symbols, args.min_frequency, args.verbose, is_dict=True)
with codecs.open(args.output.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes, separator=args.separator)
# apply BPE to each training corpus and get vocabulary
for train_file, vocab_file in zip(args.input, args.vocab):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
tmpout = codecs.open(tmp.name, 'w', encoding='UTF-8')
train_file.seek(0)
for line in train_file:
tmpout.write(bpe.segment(line).strip())
tmpout.write('\n')
tmpout.close()
tmpin = codecs.open(tmp.name, encoding='UTF-8')
vocab = learn_bpe.get_vocabulary(tmpin)
tmpin.close()
os.remove(tmp.name)
for key, freq in sorted(vocab.items(), key=lambda x: x[1], reverse=True):
vocab_file.write("{0} {1}\n".format(key, freq))
vocab_file.close()
| 39.727891 | 128 | 0.688699 |
769d917ac3520b44fdc9bfd610144abf7e2c272c | 1,150 | py | Python | XGBoost_201805/XGBoost_one/skitlearn_baseline.py | yishantao/DailyPractice | ee26859af3faf48e63d6c2850db1d895a8a88fb1 | [
"MIT"
] | null | null | null | XGBoost_201805/XGBoost_one/skitlearn_baseline.py | yishantao/DailyPractice | ee26859af3faf48e63d6c2850db1d895a8a88fb1 | [
"MIT"
] | null | null | null | XGBoost_201805/XGBoost_one/skitlearn_baseline.py | yishantao/DailyPractice | ee26859af3faf48e63d6c2850db1d895a8a88fb1 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""This module is used to bulid the model using a mixed XGBoost and skit-learn"""
from xgboost import XGBClassifier
# 加载LibSVM格式数据模块
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import accuracy_score
from matplotlib import pyplot
# read in data
my_workpath = './data/'
x_train, y_train = load_svmlight_file(my_workpath + 'agaricus.txt.train')
x_test, y_test = load_svmlight_file(my_workpath + 'agaricus.txt.test')
# print(x_train.shape)
# print(x_test.shape)
# 设置boosting迭代计算次数
num_round = 2
# bst = XGBClassifier(**params)
bst = XGBClassifier(max_depth=2, learning_rate=1, n_estimators=num_round, silent=True, objective='binary:logistic')
bst.fit(x_train, y_train)
# 查看模型在训练集上的性能
train_preds = bst.predict(x_train)
train_predictions = [round(value) for value in train_preds]
train_accuracy = accuracy_score(y_train, train_predictions)
print('Train Accuracy:%.2f%%' % (train_accuracy * 100.0))
# make prediction
preds = bst.predict(x_test)
predictions = [round(value) for value in preds]
test_accuracy = accuracy_score(y_test, predictions)
print('Test Accuracy:%.2f%%' % (test_accuracy * 100.0))
| 31.944444 | 115 | 0.770435 |
9b2c3f005071a808a38ffabe85c6c667c84569d6 | 475 | py | Python | students/migrations/0004_auto_20160916_0638.py | SoftwareSecureGroup/students_website | 79403878a0158f56e168ec8d5f43bfa6af1ec86a | [
"MIT"
] | null | null | null | students/migrations/0004_auto_20160916_0638.py | SoftwareSecureGroup/students_website | 79403878a0158f56e168ec8d5f43bfa6af1ec86a | [
"MIT"
] | 1 | 2016-09-17T15:45:35.000Z | 2016-09-17T15:45:35.000Z | students/migrations/0004_auto_20160916_0638.py | SoftwareSecureGroup/students_website | 79403878a0158f56e168ec8d5f43bfa6af1ec86a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-16 06:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0003_auto_20160916_0638'),
]
operations = [
migrations.AlterField(
model_name='student',
name='photo',
field=models.ImageField(null=True, upload_to='static/photos'),
),
]
| 23.75 | 74 | 0.625263 |
81afd6a36c441c0efa4c56376b6737da2a8cc722 | 10,438 | py | Python | venv/lib/python3.6/site-packages/werkzeug/contrib/wrappers.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/werkzeug/contrib/wrappers.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | 1 | 2021-06-01T23:32:38.000Z | 2021-06-01T23:32:38.000Z | venv/lib/python3.6/site-packages/werkzeug/contrib/wrappers.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
try:
from simplejson import loads
except ImportError:
from json import loads
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._compat import wsgi_decoding_dance
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if "json" not in self.environ.get("CONTENT_TYPE", ""):
raise BadRequest("Not a JSON request")
try:
return loads(self.data.decode(self.charset, self.encoding_errors))
except Exception:
raise BadRequest("Unable to read JSON request")
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if "protobuf" not in self.environ.get("CONTENT_TYPE", ""):
raise BadRequest("Not a Protobuf request")
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: https://wsgi.readthedocs.io/en/latest/specifications/routing_args.html
"""
def _get_routing_args(self):
return self.environ.get("wsgiorg.routing_args", (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError(
"A shallow request tried to modify the WSGI "
"environment. If you really want to do that, "
"set `shallow` to False."
)
self.environ["wsgiorg.routing_args"] = (value, self.routing_vars)
routing_args = property(
_get_routing_args,
_set_routing_args,
doc="""
The positional URL arguments as `tuple`.""",
)
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get("wsgiorg.routing_args")
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError(
"A shallow request tried to modify the WSGI "
"environment. If you really want to do that, "
"set `shallow` to False."
)
self.environ["wsgiorg.routing_args"] = (self.routing_args, value)
routing_vars = property(
_get_routing_vars,
_set_routing_vars,
doc="""
The keyword URL arguments as `dict`.""",
)
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = wsgi_decoding_dance(
self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
)
return path.lstrip("/")
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = wsgi_decoding_dance(
self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
)
return path.rstrip("/") + "/"
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = "latin1"
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return "latin1"
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get("CONTENT_TYPE")
if header:
ct, options = parse_options_header(header)
charset = options.get("charset")
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = "utf-8"
def _get_charset(self):
header = self.headers.get("content-type")
if header:
charset = parse_options_header(header)[1].get("charset")
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get("content-type")
ct, options = parse_options_header(header)
if not ct:
raise TypeError("Cannot set charset if Content-Type " "header is missing.")
options["charset"] = charset
self.headers["Content-Type"] = dump_options_header(ct, options)
charset = property(
_get_charset,
_set_charset,
doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""",
)
del _get_charset, _set_charset
| 34.448845 | 93 | 0.635754 |
c2f24a6d41e3356251a2b31e0e67eb529c0c5bc8 | 2,667 | py | Python | desktop/core/ext-py/django_celery_results-1.0.4/django_celery_results/migrations/0001_initial.py | maulikjs/hue | 59ac879b55bb6fb26ecb4e85f4c70836fc21173f | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/django_celery_results-1.0.4/django_celery_results/migrations/0001_initial.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/django_celery_results-1.0.4/django_celery_results/migrations/0001_initial.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TaskResult',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('task_id', models.CharField(
max_length=getattr(
settings,
'DJANGO_CELERY_RESULTS_TASK_ID_MAX_LENGTH',
255
),
unique=True,
verbose_name='task id'
)),
('status', models.CharField(choices=[('FAILURE', 'FAILURE'),
('PENDING', 'PENDING'),
('RECEIVED', 'RECEIVED'),
('RETRY', 'RETRY'),
('REVOKED', 'REVOKED'),
('STARTED', 'STARTED'),
('SUCCESS', 'SUCCESS')],
default='PENDING',
max_length=50,
verbose_name='state')),
('content_type', models.CharField(
max_length=128, verbose_name='content type')),
('content_encoding', models.CharField(
max_length=64, verbose_name='content encoding')),
('result', models.TextField(default=None, editable=False,
null=True)),
('date_done', models.DateTimeField(
auto_now=True, verbose_name='done at')),
('traceback', models.TextField(
blank=True, null=True, verbose_name='traceback')),
('hidden', models.BooleanField(
db_index=True, default=False, editable=False)),
('meta', models.TextField(default=None, editable=False,
null=True)),
],
options={
'verbose_name': 'task result',
'verbose_name_plural': 'task results',
},
),
]
| 42.333333 | 78 | 0.40045 |
57414f600167f9f0c7b034cf8675f805b5389349 | 8,570 | py | Python | padthai/gpt_neo/__init__.py | titipata/padthai | 60848659cd78cf0adaf957f958ebcfc366f4447f | [
"Apache-2.0"
] | 3 | 2017-05-01T10:25:02.000Z | 2017-06-05T16:40:43.000Z | padthai/gpt_neo/__init__.py | titipata/padthai | 60848659cd78cf0adaf957f958ebcfc366f4447f | [
"Apache-2.0"
] | 3 | 2017-04-30T09:54:11.000Z | 2017-06-27T20:56:18.000Z | padthai/gpt_neo/__init__.py | titipata/padthai | 60848659cd78cf0adaf957f958ebcfc366f4447f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import List
import torch
from torch.utils.data import Dataset, random_split
from transformers import (
GPT2Tokenizer,
TrainingArguments,
Trainer,
GPTNeoForCausalLM
)
import os
torch.manual_seed(42)
class ListDataset(Dataset):
"""
Thank you code from https://link.medium.com/4FfbALWz8gb
"""
def __init__(
self,
txt_list: List[str],
tokenizer: GPT2Tokenizer,
max_length: int,
bos_token: str,
eos_token: str
):
self.input_ids = []
self.attn_masks = []
self.labels = []
for txt in txt_list:
encodings_dict = tokenizer(
bos_token + txt + eos_token,
truncation=True,
max_length=max_length,
padding="max_length"
)
self.input_ids.append(torch.tensor(encodings_dict['input_ids']))
self.attn_masks.append(
torch.tensor(encodings_dict['attention_mask'])
)
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx: int):
return self.input_ids[idx], self.attn_masks[idx]
class GPTNeoFewShot:
"""
Few-Shot Learning using GPT-Neo
Hoempage: `EleutherAI/gpt-neo <https://github.com/EleutherAI/gpt-neo>`_
Thank you code from https://link.medium.com/4FfbALWz8gb
:param str model_dir: path of model dir
:param str model_name: model name (thaigpt-next or gpt-neo)
:param str device: device
:param str size: model size
**Options for size**
* *125M* (default) - GPT-Neo 125M / thaigpt-next-125M
* *1.3B* - GPT-Neo 1.3B
* *2.7B* - GPT-Neo 2.7B
**Options for model_name**
* *thaigpt-next* (default) - It is fine-tune the GPT-Neo model for Thai language.
* *gpt-neo*
"""
def __init__(
self,
model_dir: str,
model_name: str = "gpt-neo",
device: str = "cuda",
size: str = "125M"
):
"""
:param str model_dir: path of model dir
:param str model_name: model name (thaigpt-next or gpt-neo)
:param str device: device
:param str size: model size
**Options for size**
* *125M* (default) - GPT-Neo 125M / thaigpt-next-125M
* *1.3B* - GPT-Neo 1.3B
* *2.7B* - GPT-Neo 2.7B
"""
self.device = device
self.bos_token = '<|startoftext|>'
self.eos_token = '<|endoftext|>'
self.pad_token = '<|pad|>'
self.model_dir = model_dir
if not os.path.exists(self.model_dir):
self._init_model(model_name, size)
else:
self.load_model()
def _init_model(self, model_name: str, size: str = "125M") -> None:
"""
init GPT-Neo model
:param str size: model size
**Options for size**
* *125M* (default) - GPT-Neo 125M
* *1.3B* - GPT-Neo 1.3B
* *2.7B* - GPT-Neo 2.7B
"""
if model_name == "thaigpt-next" and size == "125M":
self.pretrained = "wannaphong/thaigpt-next-125m"
elif model_name == "gpt-neo":
self.pretrained = "EleutherAI/gpt-neo-"+str(size)
else:
raise ValueError('Not support {0}'.format(model_name+" "+size))
self.tokenizer = GPT2Tokenizer.from_pretrained(
self.pretrained,
bos_token=self.bos_token,
eos_token=self.eos_token,
pad_token=self.pad_token
)
self.tokenizer.save_pretrained(self.model_dir)
self.model = GPTNeoForCausalLM.from_pretrained(
self.pretrained
).to(self.device)
self.model.resize_token_embeddings(len(self.tokenizer))
def load_model(self):
"""
Load model from path of model directory
"""
self.model_dir = self.model_dir
self.tokenizer = GPT2Tokenizer.from_pretrained(
self.model_dir,
bos_token=self.bos_token,
eos_token=self.eos_token,
pad_token=self.pad_token
)
self.model = GPTNeoForCausalLM.from_pretrained(
self.model_dir
).to(self.device)
self.model.resize_token_embeddings(len(self.tokenizer))
def train(
self,
data: List[str],
logging_dir: str,
num_train_epochs: int = 10,
train_size: float = 0.95,
batch_size: int = 2,
save_every_epochs: bool = True
):
"""
Train model
:param str data: List for text
:param str logging_dir: logging directory
:param int num_train_epochs: Number train epochs
:param str train_size: size of train set
:param bool save_every_epochs: save model every epochs
"""
if save_every_epochs:
self.evaluation_strategy = "epoch"
else:
self.evaluation_strategy = "no"
self.data = data
self.max_length = max(
[len(self.tokenizer.encode(i)) for i in self.data]
)
self.dataset = ListDataset(
self.data,
self.tokenizer,
max_length=self.max_length,
bos_token=self.bos_token,
eos_token=self.eos_token
)
self.train_size = int(train_size * len(self.dataset))
_, self.val_dataset = random_split(
self.dataset, [
self.train_size, len(self.dataset) - self.train_size
]
)
self.training_args = TrainingArguments(
output_dir=self.model_dir,
do_train=True,
do_eval=True,
evaluation_strategy="epoch",
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
logging_strategy="epoch",
save_strategy=self.evaluation_strategy,
per_device_eval_batch_size=batch_size,
logging_dir=logging_dir
)
self.train = Trainer(
model=self.model,
args=self.training_args,
train_dataset=self.dataset,
eval_dataset=self.val_dataset,
data_collator=lambda data: {
'input_ids': torch.stack([f[0] for f in data]),
'attention_mask': torch.stack([f[1] for f in data]),
'labels': torch.stack([f[0] for f in data])
}
)
self.train.train()
self.train.evaluate()
self.train.save_model(self.model_dir)
def remove_bos(self, txt: str) -> str:
return txt.replace(self.bos_token, '')
def remove_eos(self, txt: str) -> str:
return txt.replace(self.eos_token, '')
def remove_bos_eos(self, txt: str) -> str:
return self.remove_eos(self.remove_bos(txt))
def gen(
self,
text: str,
top_k: int = 50,
max_length: int = 89,
top_p: float = 0.95,
keep_bos: bool = False,
keep_eos: bool = False,
temperature: int = 1,
num_return_sequences: int = 5,
skip_special_tokens: bool = True
) -> List[str]:
"""
:param str text: text
:param int top_k: top k
:param int max_length: max length of return sequences
:param float top_p: top p
:param bool keep_bos: keep beginning of a sentence
:param bool keep_eos: keep end of a sentence
:param int temperature: temperature
:param int num_return_sequences: number of return sequences
:param bool skip_special_tokens: skip special tokens
:return: return sequences
:rtype: List[str]
"""
self.generated = self.tokenizer(
self.bos_token + text, return_tensors="pt"
).input_ids.to(self.device)
self.sample_outputs = self.model.generate(
self.generated,
do_sample=True,
top_k=top_k,
max_length=max_length,
top_p=top_p,
temperature=temperature,
num_return_sequences=num_return_sequences
)
_temp = [
self.tokenizer.decode(
i, skip_special_tokens=skip_special_tokens
) for i in self.sample_outputs
]
if not keep_bos and not keep_eos:
return [self.remove_bos_eos(i) for i in _temp]
elif not keep_bos:
return [self.remove_bos(i) for i in _temp]
elif not keep_eos:
return [self.remove_eos(i) for i in _temp]
else:
return _temp
| 32.097378 | 89 | 0.570595 |
51cfbb6822fb6d71d3f8a03cb25dfea0ccb5c29d | 369 | py | Python | other/dingding/dingtalk/api/rest/OapiDingtalkImpaasMessageCrossdomainReadRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiDingtalkImpaasMessageCrossdomainReadRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiDingtalkImpaasMessageCrossdomainReadRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2020.09.08
'''
from dingtalk.api.base import RestApi
class OapiDingtalkImpaasMessageCrossdomainReadRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.message_read_model = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.dingtalk.impaas.message.crossdomain.read'
| 24.6 | 65 | 0.783198 |
5a870413b2697f6fd73a6644bebfeaea8876411c | 765 | py | Python | setup.py | geyang/memory | c6f13249f5427fe49ca2db42cc2b6a8574a25638 | [
"MIT"
] | 1 | 2020-11-19T06:55:14.000Z | 2020-11-19T06:55:14.000Z | setup.py | geyang/memory | c6f13249f5427fe49ca2db42cc2b6a8574a25638 | [
"MIT"
] | null | null | null | setup.py | geyang/memory | c6f13249f5427fe49ca2db42cc2b6a8574a25638 | [
"MIT"
] | null | null | null | from os import path
from setuptools import setup, find_packages
with open(path.join(path.abspath(path.dirname(__file__)), 'VERSION'), encoding='utf-8') as f:
version = f.read()
with open(path.join(path.abspath(path.dirname(__file__)), 'README'), encoding='utf-8') as f:
long_description = f.read()
setup(name='memory',
# note: only include the fetch package and children, no tests or experiments
packages=[p for p in find_packages() if "spec" not in p],
install_requires=["numpy", ],
description="Efficient Implementation of Sparse Graphs with Numpy",
long_description=long_description,
author='Ge Yang',
url='https://github.com/geyang/memory',
author_email='ge.ike.yang@gmail.com',
version=version)
| 38.25 | 93 | 0.695425 |
b497a8c12ddba39fc83a26695de52688de07bb7a | 1,538 | py | Python | games/migrations/0006_new_translated_fields.py | unawe/spaceawe | adac8311d6bd7d024a87cd5f977a5a0aafedd51d | [
"MIT"
] | null | null | null | games/migrations/0006_new_translated_fields.py | unawe/spaceawe | adac8311d6bd7d024a87cd5f977a5a0aafedd51d | [
"MIT"
] | 103 | 2016-04-14T05:54:48.000Z | 2022-03-11T23:13:47.000Z | games/migrations/0006_new_translated_fields.py | unawe/spaceawe | adac8311d6bd7d024a87cd5f977a5a0aafedd51d | [
"MIT"
] | 2 | 2016-11-02T15:57:31.000Z | 2017-01-06T14:03:43.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from parler.utils.context import switch_language
def copy_en_fields(apps, schema_editor):
Game = apps.get_model("games", "Game")
GameTranslation = apps.get_model("games", "GameTranslation")
for game in Game.objects.all():
game_t = GameTranslation.objects.get(master=game, language_code='en')
game_t.slug = game.slug
game_t.title = game.title
game_t.teaser = game.teaser
game_t.save()
class Migration(migrations.Migration):
dependencies = [
('games', '0005_auto_20160204_0713'),
]
operations = [
migrations.AlterModelOptions(
name='game',
options={'ordering': ['release_date']},
),
migrations.AddField(
model_name='gametranslation',
name='slug',
field=models.SlugField(max_length=255, blank=True, help_text='The Slug must be unique, and closely match the title for better SEO; it is used as part of the URL.'),
),
migrations.AddField(
model_name='gametranslation',
name='title',
field=models.CharField(max_length=255, blank=True, help_text='Short (and commonly used) name'),
),
migrations.AddField(
model_name='gametranslation',
name='teaser',
field=models.CharField(max_length=255, blank=True),
),
migrations.RunPython(copy_en_fields),
]
| 27.963636 | 176 | 0.622887 |
08c3916548eeeef05d3787e37342bc6cea54b580 | 7,189 | py | Python | test/test_parsing_primitives.py | chenliangomc/pymap | 42581712631e9e9787e9dd094a22f5cc607f804d | [
"MIT"
] | null | null | null | test/test_parsing_primitives.py | chenliangomc/pymap | 42581712631e9e9787e9dd094a22f5cc607f804d | [
"MIT"
] | null | null | null | test/test_parsing_primitives.py | chenliangomc/pymap | 42581712631e9e9787e9dd094a22f5cc607f804d | [
"MIT"
] | null | null | null |
import unittest
from pymap.parsing import Params
from pymap.parsing.exceptions import NotParseable, RequiresContinuation
from pymap.parsing.primitives import Nil, Number, Atom, String, QuotedString, \
LiteralString, ListP
class TestNil(unittest.TestCase):
def test_parse(self):
ret, buf = Nil.parse(b' nil ', Params())
self.assertIsInstance(ret, Nil)
self.assertIsNone(ret.value)
self.assertEqual(b' ', buf)
def test_parse_failure(self):
with self.assertRaises(NotParseable):
Nil.parse(b'', Params())
with self.assertRaises(NotParseable):
Nil.parse(b'niltest', Params())
def test_bytes(self):
nil = Nil()
self.assertEqual(b'NIL', bytes(nil))
class TestNumber(unittest.TestCase):
def test_parse(self):
ret, buf = Number.parse(b' 123 ', Params())
self.assertIsInstance(ret, Number)
self.assertEqual(123, ret.value)
self.assertEqual(b' ', buf)
def test_parse_failure(self):
with self.assertRaises(NotParseable):
Number.parse(b'abc', Params())
with self.assertRaises(NotParseable):
Number.parse(b'123abc', Params())
def test_bytes(self):
nil = Number(456)
self.assertEqual(b'456', bytes(nil))
class TestAtom(unittest.TestCase):
def test_parse(self):
ret, buf = Atom.parse(b' AtoM asdf ', Params())
self.assertIsInstance(ret, Atom)
self.assertEqual(b'AtoM', ret.value)
self.assertEqual(b' asdf ', buf)
def test_parse_failure(self):
with self.assertRaises(NotParseable):
Atom.parse(b'{}', Params())
def test_bytes(self):
nil = Atom(b'TEST.STUFF:asdf')
self.assertEqual(b'TEST.STUFF:asdf', bytes(nil))
class TestString(unittest.TestCase):
def test_quoted_parse(self):
ret, buf = String.parse(br' "one\"two\\three" ', Params())
self.assertIsInstance(ret, QuotedString)
self.assertEqual(br'one"two\three', ret.value)
self.assertEqual(b' ', buf)
def test_quoted_parse_empty(self):
ret, buf = String.parse(br' "" ', Params())
self.assertIsInstance(ret, QuotedString)
self.assertEqual(br'', ret.value)
self.assertEqual(b' ', buf)
def test_quoted_parse_failure(self):
with self.assertRaises(NotParseable):
String.parse(b'test', Params())
with self.assertRaises(NotParseable):
String.parse(b'"one\r\ntwo"', Params())
with self.assertRaises(NotParseable):
String.parse(br'"one\ two"', Params())
with self.assertRaises(NotParseable):
String.parse(b'"test', Params())
def test_quoted_bytes(self):
qstring1 = QuotedString(b'one"two\\three')
self.assertEqual(b'"one\\"two\\\\three"', bytes(qstring1))
qstring2 = QuotedString(b'test', b'"asdf"')
self.assertEqual(b'"asdf"', bytes(qstring2))
def test_literal_parse(self):
ret, buf = String.parse(
b'{5}\r\n', Params(continuations=[b'test\x01abc']))
self.assertIsInstance(ret, LiteralString)
self.assertEqual(b'test\x01', ret.value)
self.assertFalse(ret.binary)
self.assertEqual(b'abc', buf)
def test_literal_parse_empty(self):
ret, buf = String.parse(
b'{0}\r\n', Params(continuations=[b'abc']))
self.assertIsInstance(ret, LiteralString)
self.assertEqual(b'', ret.value)
self.assertEqual(b'abc', buf)
def test_literal_plus(self):
ret, buf = String.parse(b'{5+}\r\ntest\x01abc', Params())
self.assertIsInstance(ret, LiteralString)
self.assertEqual(b'test\x01', ret.value)
self.assertFalse(ret.binary)
self.assertEqual(b'abc', buf)
def test_literal_binary(self):
ret, buf = String.parse(
b'~{3}\r\n', Params(continuations=[b'\x00\x01\02abc']))
self.assertIsInstance(ret, LiteralString)
self.assertEqual(b'\x00\x01\x02', ret.value)
self.assertTrue(ret.binary)
self.assertEqual(b'abc', buf)
def test_literal_plus_binary(self):
ret, buf = String.parse(b'~{3+}\r\n\x00\x01\02abc', Params())
self.assertIsInstance(ret, LiteralString)
self.assertEqual(b'\x00\x01\x02', ret.value)
self.assertTrue(ret.binary)
self.assertEqual(b'abc', buf)
def test_literal_parse_failure(self):
with self.assertRaises(NotParseable):
String.parse(b'{}\r\n', Params())
with self.assertRaises(NotParseable):
String.parse(b'{10}', Params())
with self.assertRaises(NotParseable):
String.parse(b'{10}\r\nabc', Params())
with self.assertRaises(RequiresContinuation):
String.parse(b'{10}\r\n', Params())
with self.assertRaises(NotParseable):
String.parse(b'{10}\r\n', Params(continuations=[b'a'*9]))
with self.assertRaises(NotParseable):
String.parse(b'{10+}\r\n' + (b'a'*9), Params())
with self.assertRaises(NotParseable) as raised:
String.parse(b'{4097}\r\n', Params())
self.assertEqual(b'[TOOBIG]', bytes(raised.exception.code))
def test_literal_bytes(self):
qstring1 = LiteralString(b'one\r\ntwo')
self.assertEqual(b'{8}\r\none\r\ntwo', bytes(qstring1))
qstring2 = LiteralString(b'')
self.assertEqual(b'{0}\r\n', bytes(qstring2))
def test_build_binary(self):
ret = String.build(b'\x00\x01', True)
self.assertEqual(b'\x00\x01', ret.value)
self.assertTrue(ret.binary)
self.assertEqual(b'~{2}\r\n\x00\x01', bytes(ret))
class TestList(unittest.TestCase):
def test_parse(self):
ret, buf = ListP.parse(
b' (ONE 2 (NIL) "four" ) ',
Params(list_expected=[Nil, Number, Atom, String, ListP]))
self.assertIsInstance(ret, ListP)
self.assertEqual(4, len(ret.value))
self.assertEqual(b' ', buf)
self.assertIsInstance(ret.value[0], Atom)
self.assertEqual(b'ONE', ret.value[0].value)
self.assertIsInstance(ret.value[1], Number)
self.assertEqual(2, ret.value[1].value)
self.assertIsInstance(ret.value[2], ListP)
self.assertIsNone(ret.value[2].value[0].value)
self.assertIsInstance(ret.value[3], QuotedString)
self.assertEqual(b'four', ret.value[3].value)
def test_parse_empty(self):
ret, buf = ListP.parse(br' () ', Params())
self.assertIsInstance(ret, ListP)
self.assertEqual([], ret.value)
self.assertEqual(b' ', buf)
def test_parse_failure(self):
with self.assertRaises(NotParseable):
ListP.parse(b'{}', Params())
with self.assertRaises(NotParseable):
ListP.parse(b'("one"TWO)', Params(list_expected=[Atom, String]))
with self.assertRaises(NotParseable):
ListP.parse(b'(123 abc 456)', Params(list_expected=[Number]))
def test_bytes(self):
ret = ListP([QuotedString(b'abc'), Number(123), ListP([Nil()])])
self.assertEqual(b'("abc" 123 (NIL))', bytes(ret))
| 36.492386 | 79 | 0.618584 |
3fc100d004f12d64ef519d51e4ae3a3169e88cec | 3,102 | py | Python | armus1/db_model/db_handle.py | lizewei-681/Test-master | feaa48d341340ba985b7f0505ca64cc0b837f123 | [
"MIT"
] | null | null | null | armus1/db_model/db_handle.py | lizewei-681/Test-master | feaa48d341340ba985b7f0505ca64cc0b837f123 | [
"MIT"
] | null | null | null | armus1/db_model/db_handle.py | lizewei-681/Test-master | feaa48d341340ba985b7f0505ca64cc0b837f123 | [
"MIT"
] | null | null | null | from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import *
import datetime
from datetime import timedelta
# 初始化数据库连接:
engine = create_engine('mysql+pymysql://root:12345678@localhost:3306/notice_information')
#=create_engine('sqlite:///universitys.db')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
Base = declarative_base()
class Notification(Base):
"""the class map to table of crystal.notifications"""
__tablename__ = 'notifications'
url = Column(String, primary_key=True) #通知全文的url
title = Column(String(100)) #讲座题目
college = Column(String) #讲座所在大学
speaker = Column(String) #讲座演讲人
venue = Column(String) #讲座地点
time = Column(DATETIME) #讲座时间
notify_time = Column(DATE) #通知发布时间
def delete():#进行筛选操作,选出标题中含有密码学和信息安全的数据,清空表格,再插入选中的数据
session = DBSession()
temp = session.query(Notification).filter(or_(Notification.title.like("%密码学%"),Notification.title.like("%信息安全%"))).all()
temp_Eng = session.query(Notification).filter(or_(Notification.title.like("%security%"),Notification.title.like("%password%"))).all()
session.query(Notification).delete()
session.commit()
for t in temp:
t_dict = t.__dict__
frank = Notification(title = t_dict['title'], speaker = t_dict['speaker'], time = t_dict['time'], venue = t_dict['venue'], college = t_dict['college'], url = t_dict['url'], notify_time = t_dict['notify_time'])
session.add(frank)
session.commit()
for t in temp_Eng:
t_dict = t.__dict__
frank = Notification(title = t_dict['title'], speaker = t_dict['speaker'], time = t_dict['time'], venue = t_dict['venue'], college = t_dict['college'], url = t_dict['url'], notify_time = t_dict['notify_time'])
session.add(frank)
session.commit()
def orderbyrelease():#按照发布时间排序,并且只选中近一年的数据
session = DBSession()
temp = session.query(Notification).filter(Notification.notify_time >= datetime.datetime.now() - timedelta(days=365)).order_by(desc(Notification.notify_time)).all()
print("按照通知发布时间由近及远排序:")
for t in temp:
t_dict = t.__dict__
print("讲座标题:",t_dict['title'])
print("报告人:",t_dict['speaker'])
print("时间:",t_dict['time'])
print("地点:",t_dict['venue'])
print("大学:",t_dict['college'])
print("通知全文链接:",t_dict['url'])
print()
def orderbytime():#按照举行时间排序,并且只选中近一年的数据
session = DBSession()
temp = session.query(Notification).filter(Notification.notify_time >= datetime.datetime.now() - timedelta(days=365)).order_by(desc(Notification.time)).all()
print("按照报告举行时间由近及远排序:")
for t in temp:
t_dict = t.__dict__
print("讲座标题:",t_dict['title'])
print("报告人:",t_dict['speaker'])
print("时间:",t_dict['time'])
print("地点:",t_dict['venue'])
print("大学:",t_dict['college'])
print("通知全文链接:",t_dict['url'])
print()
delete()
orderbytime() | 39.769231 | 217 | 0.630561 |
e18eca866cd0dbbb01bbb41e8ff5ecb05c51ace3 | 36 | py | Python | tests/proc_basic.py | farisachugthai/pyuv | 39342fc2fd688f2fb2120d3092dd9cf52f537de2 | [
"MIT"
] | 826 | 2015-01-02T15:03:20.000Z | 2022-03-28T01:32:43.000Z | tests/proc_basic.py | farisachugthai/pyuv | 39342fc2fd688f2fb2120d3092dd9cf52f537de2 | [
"MIT"
] | 70 | 2015-01-09T13:55:03.000Z | 2022-03-31T11:00:16.000Z | tests/proc_basic.py | farisachugthai/pyuv | 39342fc2fd688f2fb2120d3092dd9cf52f537de2 | [
"MIT"
] | 98 | 2015-01-27T08:30:21.000Z | 2021-12-13T08:12:51.000Z | #!/usr/bin/env python
test = None
| 7.2 | 21 | 0.638889 |
ea453d299385b098e914c3eaa0dcac96f41d0749 | 8,575 | py | Python | ccxtbt/ccxtstore.py | eapfel/bt-ccxt-store | 64e261ef943269d7ae0735e82f91c0f3a2f9b277 | [
"MIT"
] | null | null | null | ccxtbt/ccxtstore.py | eapfel/bt-ccxt-store | 64e261ef943269d7ae0735e82f91c0f3a2f9b277 | [
"MIT"
] | null | null | null | ccxtbt/ccxtstore.py | eapfel/bt-ccxt-store | 64e261ef943269d7ae0735e82f91c0f3a2f9b277 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2017 Ed Bartosh <bartosh@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import time
from datetime import datetime
from functools import wraps
import backtrader as bt
import ccxt
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import with_metaclass
from ccxt.base.errors import NetworkError, ExchangeError, RequestTimeout, RateLimitExceeded
class MetaSingleton(MetaParams):
'''Metaclass to make a metaclassed class a singleton'''
def __init__(cls, name, bases, dct):
super(MetaSingleton, cls).__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = (
super(MetaSingleton, cls).__call__(*args, **kwargs))
return cls._singleton
class CCXTStore(with_metaclass(MetaSingleton, object)):
'''API provider for CCXT feed and broker classes.
Added a new get_wallet_balance method. This will allow manual checking of the balance.
The method will allow setting parameters. Useful for getting margin balances
Added new private_end_point method to allow using any private non-unified end point
'''
# Supported granularities
_GRANULARITIES = {
(bt.TimeFrame.Minutes, 1): '1m',
(bt.TimeFrame.Minutes, 3): '3m',
(bt.TimeFrame.Minutes, 5): '5m',
(bt.TimeFrame.Minutes, 15): '15m',
(bt.TimeFrame.Minutes, 30): '30m',
(bt.TimeFrame.Minutes, 60): '1h',
(bt.TimeFrame.Minutes, 90): '90m',
(bt.TimeFrame.Minutes, 120): '2h',
(bt.TimeFrame.Minutes, 180): '3h',
(bt.TimeFrame.Minutes, 240): '4h',
(bt.TimeFrame.Minutes, 360): '6h',
(bt.TimeFrame.Minutes, 480): '8h',
(bt.TimeFrame.Minutes, 720): '12h',
(bt.TimeFrame.Days, 1): '1d',
(bt.TimeFrame.Days, 3): '3d',
(bt.TimeFrame.Weeks, 1): '1w',
(bt.TimeFrame.Weeks, 2): '2w',
(bt.TimeFrame.Months, 1): '1M',
(bt.TimeFrame.Months, 3): '3M',
(bt.TimeFrame.Months, 6): '6M',
(bt.TimeFrame.Years, 1): '1y',
}
BrokerCls = None # broker class will auto register
DataCls = None # data class will auto register
@classmethod
def getdata(cls, *args, **kwargs):
'''Returns ``DataCls`` with args, kwargs'''
return cls.DataCls(*args, **kwargs)
@classmethod
def getbroker(cls, *args, **kwargs):
'''Returns broker with *args, **kwargs from registered ``BrokerCls``'''
return cls.BrokerCls(*args, **kwargs)
def __init__(self, exchange, currency, config, retries, debug=False, sandbox=False):
self.exchange = getattr(ccxt, exchange)(config)
if sandbox:
self.exchange.set_sandbox_mode(True)
self.currency = currency
self.retries = retries
self.debug = debug
balance = self.exchange.fetch_balance() if 'secret' in config else 0
if balance == 0 or not balance['free'][currency]:
self._cash = 0
else:
self._cash = balance['free'][currency]
if balance == 0 or not balance['total'][currency]:
self._value = 0
else:
self._value = balance['total'][currency]
def get_granularity(self, timeframe, compression):
if not self.exchange.has['fetchOHLCV']:
raise NotImplementedError("'%s' exchange doesn't support fetching OHLCV data" % \
self.exchange.name)
granularity = self._GRANULARITIES.get((timeframe, compression))
if granularity is None:
raise ValueError("backtrader CCXT module doesn't support fetching OHLCV "
"data for time frame %s, comression %s" % \
(bt.TimeFrame.getname(timeframe), compression))
if self.exchange.timeframes and granularity not in self.exchange.timeframes:
raise ValueError("'%s' exchange doesn't support fetching OHLCV data for "
"%s time frame" % (self.exchange.name, granularity))
return granularity
def retry(method):
@wraps(method)
def retry_method(self, *args, **kwargs):
rate_limit = self.exchange.rateLimit
for i in range(self.retries):
time.sleep(rate_limit / 1000)
try:
return method(self, *args, **kwargs)
# except (NetworkError, ExchangeError):
# if i == self.retries - 1:
# raise
except (RequestTimeout, RateLimitExceeded):
if i == self.retries - 1:
raise
# Stop 10 seconds
time.sleep(10)
except (NetworkError, ExchangeError):
if i == self.retries - 1:
raise
return retry_method
@retry
def get_wallet_balance(self, currency, params=None):
balance = self.exchange.fetch_balance(params)
return balance
@retry
def get_balance(self):
balance = self.exchange.fetch_balance()
cash = balance['free'][self.currency]
value = balance['total'][self.currency]
# Fix if None is returned
self._cash = cash if cash else 0
self._value = value if value else 0
@retry
def getposition(self):
return self._value
# return self.getvalue(currency)
@retry
def create_order(self, symbol, order_type, side, amount, price, params):
# returns the order
return self.exchange.create_order(symbol=symbol, type=order_type, side=side,
amount=amount, price=price, params=params)
@retry
def cancel_order(self, order_id, symbol):
return self.exchange.cancel_order(order_id, symbol)
@retry
def cancel_all_orders(self, symbol):
return self.exchange.cancel_all_orders(symbol)
@retry
def fetch_trades(self, symbol):
return self.exchange.fetch_trades(symbol)
@retry
def fetch_ohlcv(self, symbol, timeframe, since, limit, params={}):
if self.debug and since is not None:
print('Fetching: {}, TF: {}, Since: {}, Limit: {}'.format(symbol, timeframe,
datetime.fromtimestamp((since / 1e3)), limit))
return self.exchange.fetch_ohlcv(symbol, timeframe=timeframe, since=since, limit=limit, params=params)
@retry
def fetch_order(self, oid, symbol):
return self.exchange.fetch_order(oid, symbol)
@retry
def fetch_open_orders(self, symbol):
return self.exchange.fetch_open_orders(symbol)
@retry
def private_end_point(self, type, endpoint, params):
'''
Open method to allow calls to be made to any private end point.
See here: https://github.com/ccxt/ccxt/wiki/Manual#implicit-api-methods
- type: String, 'Get', 'Post','Put' or 'Delete'.
- endpoint = String containing the endpoint address eg. 'order/{id}/cancel'
- Params: Dict: An implicit method takes a dictionary of parameters, sends
the request to the exchange and returns an exchange-specific JSON
result from the API as is, unparsed.
To get a list of all available methods with an exchange instance,
including implicit methods and unified methods you can simply do the
following:
print(dir(ccxt.hitbtc()))
'''
return getattr(self.exchange, endpoint)(params)
| 37.609649 | 116 | 0.605714 |
0b99962ae25dee9fc030e4bd560e6d756d00d326 | 22,593 | py | Python | my_vim_files/python27/Lib/test/test_asyncore.py | satsaeid/dotfiles | 401c3213b31dd941b44e553c6f0441187b01c19a | [
"MIT"
] | null | null | null | my_vim_files/python27/Lib/test/test_asyncore.py | satsaeid/dotfiles | 401c3213b31dd941b44e553c6f0441187b01c19a | [
"MIT"
] | null | null | null | my_vim_files/python27/Lib/test/test_asyncore.py | satsaeid/dotfiles | 401c3213b31dd941b44e553c6f0441187b01c19a | [
"MIT"
] | null | null | null | import asyncore
import unittest
import select
import os
import socket
import sys
import time
import warnings
import errno
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink
from StringIO import StringIO
try:
import threading
except ImportError:
threading = None
HOST = test_support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEquals(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEquals(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEquals(lines, expected)
def test_issue_8594(self):
# XXX - this test is supposed to be removed in next major Python
# version
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
self.assertRaisesRegexp(AttributeError, 'dispatcher instance',
getattr, d, 'foo')
# cheap inheritance with the underlying socket is supposed
# to still work but a DeprecationWarning is expected
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
family = d.family
self.assertEqual(family, socket.AF_INET)
self.assertTrue(len(w) == 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue("unknown error" in err.lower())
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@test_support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
port = test_support.bind_port(sock)
cap = StringIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join()
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
if hasattr(asyncore, 'file_wrapper'):
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = "It's not dead, it's sleeping!"
file(TESTFN, 'w').write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), "It's not dead")
self.assertEqual(w.read(6), ", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = "Come again?"
d2 = "I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(file(TESTFN).read(), self.d + d1 + d2)
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class TCPServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, handler=BaseTestHandler, host=HOST, port=0):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()[:2]
def handle_accept(self):
sock, addr = self.accept()
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, address):
BaseTestHandler.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI(unittest.TestCase):
def tearDown(self):
asyncore.close_all()
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self):
BaseTestHandler.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((HOST, 0))
self.listen(5)
self.address = self.socket.getsockname()[:2]
def handle_accept(self):
self.flag = True
server = TestListener()
client = BaseClient(server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send('x' * 1024)
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
class TestClient(BaseClient):
def handle_expt(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(chr(244), socket.MSG_OOB)
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = TCPServer()
client = BaseClient(server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, socket.AF_INET)
self.assertEqual(s.socket.type, socket.SOCK_STREAM)
def test_bind(self):
s1 = asyncore.dispatcher()
s1.create_socket(socket.AF_INET, socket.SOCK_STREAM)
s1.bind((HOST, 0))
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (HOST, port))
def test_set_reuse_addr(self):
sock = socket.socket()
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket())
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.create_socket(socket.AF_INET, socket.SOCK_STREAM)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
finally:
sock.close()
class TestAPI_UseSelect(BaseTestAPI):
use_poll = False
class TestAPI_UsePoll(BaseTestAPI):
use_poll = True
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll, TestAPI_UseSelect]
if hasattr(asyncore, 'file_wrapper'):
tests.append(FileWrapperTest)
if hasattr(select, 'poll'):
tests.append(TestAPI_UsePoll)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
| 31.821127 | 83 | 0.571991 |
95147749aa22783f7c7a54a1ca202e95dd5f3bfc | 1,430 | py | Python | Code/viusalize.py | prasys/textanalyzer | fd14454d073c8571ddaa40f6ac668842e8aef726 | [
"MIT"
] | null | null | null | Code/viusalize.py | prasys/textanalyzer | fd14454d073c8571ddaa40f6ac668842e8aef726 | [
"MIT"
] | null | null | null | Code/viusalize.py | prasys/textanalyzer | fd14454d073c8571ddaa40f6ac668842e8aef726 | [
"MIT"
] | null | null | null | #https://www.kaggle.com/ceshine/tag-visualization-with-universal-sentence-encoder
import os
import re
import html as ihtml
import warnings
import random
warnings.filterwarnings('ignore')
os.environ["TFHUB_CACHE_DIR"] = "/tmp/"
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import scipy
import umap
import tensorflow as tf
import tensorflow_hub as hub
import plotly_express as px
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_colwidth', -1)
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_random_seed(SEED)
%matplotlib inline
# np load the embeddings for it
embeddings_train = np.load('USE_tuned_alta_train.npy',allow_pickle=True)
#embeddings_test = np.load('USE_tuned_alta_test.npy',allow_pickle=True)
#sentence_embeddings = np.concatenate(embeddings_train,embeddings_test,axis=0)
sentence_embeddings = embeddings_train
# viusalize how well they look together , it should be similiar to all , probably concat them together (I guess)
embedding = umap.UMAP(metric="cosine", n_components=2, random_state=42).fit_transform(sentence_embeddings)
df_se_emb = pd.DataFrame(embedding, columns=["x", "y"])
fig, ax = plt.subplots(figsize=(12, 10))
plt.scatter(
df_emb_sample["x"].values, df_emb_sample["y"].values, s=1
)
plt.setp(ax, xticks=[], yticks=[])
plt.title("Sentence embeddings embedded into two dimensions by UMAP", fontsize=18)
plt.show() | 26.481481 | 112 | 0.782517 |
2c4fb2fd0ec1dd34c21131dc7bb2179368abd6f5 | 11,624 | py | Python | pyclient/confluo/rpc/client.py | louishust/confluo | 55377acf19bd468015fb2c98cad737b264346110 | [
"Apache-2.0"
] | null | null | null | pyclient/confluo/rpc/client.py | louishust/confluo | 55377acf19bd468015fb2c98cad737b264346110 | [
"Apache-2.0"
] | null | null | null | pyclient/confluo/rpc/client.py | louishust/confluo | 55377acf19bd468015fb2c98cad737b264346110 | [
"Apache-2.0"
] | null | null | null | import logging
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.transport import TTransport, TSocket
import rpc_service
import type_conversions
from batch import RecordBatchBuilder
from schema import make_schema
from stream import RecordStream, AlertStream
class RpcClient:
""" Client for Confluo through RPC.
"""
def __init__(self, host='localhost', port=9090):
""" Initializes the rpc client to the specified host and port.
Args:
host: The host for the client.
port: The port number to communicate through.
"""
logging.basicConfig(level=logging.INFO) # TODO: Read from configuration file
self.LOG = logging.getLogger(__name__)
self.LOG.info("Connecting to %s:%d", host, port)
self.socket_ = TSocket.TSocket(host, port)
self.transport_ = TTransport.TBufferedTransport(self.socket_)
self.protocol_ = TBinaryProtocol(self.transport_)
self.client_ = rpc_service.Client(self.protocol_)
self.transport_.open()
self.client_.register_handler()
self.cur_m_id_ = -1
self.cur_schema_ = None
def close(self):
""" Closes the rpc client.
"""
self.disconnect()
def connect(self, host, port):
""" Connects the rpc client to the specified host and port.
Args:
host: The host of the client.
port: The port number to communicate through.
"""
self.LOG.info("Connecting to %s:%d", host, port)
self.socket_ = TSocket.TSocket(host, port)
self.transport_ = TTransport.TBufferedTransport(self.socket_)
self.protocol_ = TBinaryProtocol(self.transport_)
self.client_ = rpc_service.Client(self.protocol_)
self.transport_.open()
self.client_.register_handler()
def disconnect(self):
""" Disconnects the rpc client from the host and port.
"""
if self.transport_.isOpen():
host = self.socket_.host
port = self.socket_.port
self.LOG.info("Disconnecting from %s:%d", host, port)
self.client_.deregister_handler()
self.transport_.close()
def create_atomic_multilog(self, name, schema, storage_mode):
""" Creates an atomic multilog for this client.
Args:
name: The name of the atomic multilog to create.
schema: The schema for the atomic multilog.
storage_mode: The mode for storage.
"""
self.cur_schema_ = make_schema(schema)
rpc_schema = type_conversions.convert_to_rpc_schema(self.cur_schema_)
self.cur_m_id_ = self.client_.create_atomic_multilog(name, rpc_schema, storage_mode)
def set_current_atomic_multilog(self, atomic_multilog_name):
""" Sets the atomic multilog to the desired atomic multilog.
Args:
atomic_multilog_name: The name of atomic multilog to set the current atomic multilog to.
"""
info = self.client_.get_atomic_multilog_info(atomic_multilog_name)
self.cur_schema_ = type_conversions.convert_to_schema(info.schema)
self.cur_m_id_ = info.id
def remove_atomic_multilog(self):
""" Removes an atomic multilog from the client.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.remove_atomic_multilog(self.cur_m_id_)
self.cur_m_id_ = -1
def add_index(self, field_name, bucket_size=1):
""" Adds an index to the atomic multilog.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.add_index(self.cur_m_id_, field_name, bucket_size)
def remove_index(self, field_name):
""" Removes an index from the atomic multilog.
Args:
field_name: The name of the associated field.
Raises:
ValueError
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.remove_index(self.cur_m_id_, field_name)
def add_filter(self, filter_name, filter_expr):
""" Adds a filter to the atomic multilog.
Args:
filter_name: The name of the filter
filter_expr: The filter expression
Raises:
ValueError
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.add_filter(self.cur_m_id_, filter_name, filter_expr)
def remove_filter(self, filter_name):
""" Removes a filter from the atomic multilog.
Args:
filter_name: The name of the filter.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.remove_filter(self.cur_m_id_, filter_name)
def add_aggregate(self, aggregate_name, filter_name, aggregate_expr):
""" Adds an aggregate to the atomic multilog.
Args:
aggregate_name: The name of the aggregate.
filter_name: The name of the filter.
aggregate_expr: The aggregate expression.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.add_aggregate(self.cur_m_id_, aggregate_name, filter_name, aggregate_expr)
def remove_aggregate(self, aggregate_name):
""" Removes an aggregate from the atomic multilog.
Args:
aggregate_name: The name of the aggregate.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.remove_aggregate(self.cur_m_id_, aggregate_name)
def install_trigger(self, trigger_name, trigger_expr):
""" Adds a trigger to the atomic multilog.
Args:
trigger_name: The name of the trigger to add.
trigger_expr: The trigger expression.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.add_trigger(self.cur_m_id_, trigger_name, trigger_expr)
def remove_trigger(self, trigger_name):
""" Removes a trigger from the atomic multilog.
Args:
trigger_name: The name of the trigger.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
self.client_.remove_trigger(self.cur_m_id_, trigger_name)
def append_raw(self, data):
""" Append raw data to the atomic multilog.
Args:
data: The data to append.
Raises:
ValueError.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
if len(data) != self.cur_schema_.record_size_:
raise ValueError("Record length must be: {}, is: {}".format(self.cur_schema_.record_size_, len(data)))
return self.client_.append(self.cur_m_id_, data)
def append(self, rec):
""" Append record to the atomic multilog.
Args:
rec: The record to append.
Raises:
ValueError.
"""
return self.append_raw(self.cur_schema_.pack(rec))
def get_batch_builder(self):
"""Get a record batch builder instance
Returns:
A record batch builder instance.
"""
return RecordBatchBuilder(self.cur_schema_)
def read_raw(self, offset):
""" Reads raw data from a specified offset.
Args:
offset: The offset from the log to read from.
Raises:
ValueError.
Returns:
The data at the offset.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
return self.client_.read(self.cur_m_id_, offset, self.cur_schema_.record_size_)
def read(self, offset):
buf = self.read_raw(offset)
return self.cur_schema_.apply(buf)
def get_aggregate(self, aggregate_name, begin_ms, end_ms):
""" Gets an aggregate from the atomic multilog.
Args:
aggregate_name: The name of the aggregate.
begin_ms: The beginning time in milliseconds.
end_ms: The end time in milliseconds.
Raises:
ValueError.
Returns:
The aggregate.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
return self.client_.query_aggregate(self.cur_m_id_, aggregate_name, begin_ms, end_ms)
def execute_filter(self, filter_expr):
""" Executes a specified filter.
Args:
filter_expr: The filter expression.
Raises:
ValueError.
Returns:
Record stream containing the data.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
handle = self.client_.adhoc_filter(self.cur_m_id_, filter_expr)
return RecordStream(self.cur_m_id_, self.cur_schema_, self.client_, handle)
def query_filter(self, filter_name, begin_ms, end_ms, filter_expr=""):
""" Queries a filter.
Args:
filter_name: The name of the filter.
begin_ms: The beginning time in milliseconds.
end_ms: The end time in milliseconds.
filter_expr: The filter expression.
Raises:
ValueError.
Returns:
A record stream containing the results of the filter.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
if filter_expr == "":
handle = self.client_.predef_filter(self.cur_m_id_, filter_name, begin_ms, end_ms)
return RecordStream(self.cur_m_id_, self.cur_schema_, self.client_, handle)
else:
handle = self.client_.combined_filter(self.cur_m_id_, filter_name, filter_expr, begin_ms, end_ms)
return RecordStream(self.cur_m_id_, self.cur_schema_, self.client_, handle)
def get_alerts(self, begin_ms, end_ms, trigger_name=""):
""" Gets the alerts.
Args:
begin_ms: The beginning time in milliseconds.
end_ms: The end time in milliseconds.
trigger_name: The name of the trigger.
Raises:
ValueError.
Returns:
A stream of alerts.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
if trigger_name == "":
handle = self.client_.alerts_by_time(self.cur_m_id_, begin_ms, end_ms)
return AlertStream(self.cur_m_id_, self.client_, handle)
else:
handle = self.client_.alerts_by_trigger_and_time(self.cur_m_id_, trigger_name, begin_ms, end_ms)
return AlertStream(self.cur_m_id_, self.client_, handle)
def num_records(self):
""" Gets the number of records in the atomic multilog.
Raises:
ValueError.
Returns:
The number of records.
"""
if self.cur_m_id_ == -1:
raise ValueError("Must set atomic multilog first.")
return self.client_.num_records(self.cur_m_id_)
| 35.224242 | 114 | 0.617085 |
16f17a6b328b2526f5a940d209a40ecd87db655c | 33,127 | py | Python | syft/workers/base.py | mukira/PySyft | 94595008e8326d3111406ae143099b311fc3f2e6 | [
"Apache-2.0"
] | 1 | 2021-04-25T08:55:43.000Z | 2021-04-25T08:55:43.000Z | syft/workers/base.py | mukira/PySyft | 94595008e8326d3111406ae143099b311fc3f2e6 | [
"Apache-2.0"
] | null | null | null | syft/workers/base.py | mukira/PySyft | 94595008e8326d3111406ae143099b311fc3f2e6 | [
"Apache-2.0"
] | null | null | null | import logging
from abc import abstractmethod
import syft as sy
from syft.frameworks.torch.tensors.interpreters import AbstractTensor
from syft.generic import ObjectStorage
from syft.exceptions import GetNotPermittedError
from syft.exceptions import WorkerNotFoundException
from syft.exceptions import ResponseSignatureError
from syft.workers import AbstractWorker
from syft import messaging
from syft import codes
from typing import Callable
from typing import List
from typing import Tuple
from typing import Union
from typing import TYPE_CHECKING
import torch
# this if statement avoids circular imports between base.py and pointer.py
if TYPE_CHECKING:
from syft.frameworks.torch import pointers
logger = logging.getLogger(__name__)
class BaseWorker(AbstractWorker, ObjectStorage):
"""Contains functionality to all workers.
Other workers will extend this class to inherit all functionality necessary
for PySyft's protocol. Extensions of this class overrides two key methods
_send_msg() and _recv_msg() which are responsible for defining the
procedure for sending a binary message to another worker.
At it's core, BaseWorker (and all workers) is a collection of objects owned
by a certain machine. Each worker defines how it interacts with objects on
other workers as well as how other workers interact with objects owned by
itself. Objects are either tensors or of any type supported by the PySyft
protocol.
Args:
hook: A reference to the TorchHook object which is used
to modify PyTorch with PySyft's functionality.
id: An optional string or integer unique id of the worker.
known_workers: An optional dictionary of all known workers on a
network which this worker may need to communicate with in the
future. The key of each should be each worker's unique ID and
the value should be a worker class which extends BaseWorker.
Extensions of BaseWorker will include advanced functionality
for adding to this dictionary(node discovery). In some cases,
one can initialize this with known workers to help bootstrap
the network.
data: Initialize workers with data on creating worker object
is_client_worker: An optional boolean parameter to indicate
whether this worker is associated with an end user client. If
so, it assumes that the client will maintain control over when
variables are instantiated or deleted as opposed to handling
tensor/variable/model lifecycle internally. Set to True if this
object is not where the objects will be stored, but is instead
a pointer to a worker that eists elsewhere.
log_msgs: An optional boolean parameter to indicate whether all
messages should be saved into a log for later review. This is
primarily a development/testing feature.
auto_add: Determines whether to automatically add this worker to the
list of known workers.
"""
def __init__(
self,
hook: "sy.TorchHook",
id: Union[int, str] = 0,
data: Union[List, tuple] = None,
is_client_worker: bool = False,
log_msgs: bool = False,
verbose: bool = False,
auto_add: bool = True,
):
"""Initializes a BaseWorker."""
super().__init__()
self.hook = hook
self.torch = None if hook is None else hook.torch
self.id = id
self.is_client_worker = is_client_worker
self.log_msgs = log_msgs
self.verbose = verbose
self.auto_add = auto_add
self.msg_history = list()
# For performance, we cache each
self._message_router = {
codes.MSGTYPE.CMD: self.execute_command,
codes.MSGTYPE.OBJ: self.set_obj,
codes.MSGTYPE.OBJ_REQ: self.respond_to_obj_req,
codes.MSGTYPE.OBJ_DEL: self.rm_obj,
codes.MSGTYPE.IS_NONE: self.is_tensor_none,
codes.MSGTYPE.GET_SHAPE: self.get_tensor_shape,
codes.MSGTYPE.SEARCH: self.deserialized_search,
codes.MSGTYPE.FORCE_OBJ_DEL: self.force_rm_obj,
}
self.load_data(data)
# Declare workers as appropriate
self._known_workers = {}
if auto_add:
if hook.local_worker is not None:
known_workers = self.hook.local_worker._known_workers
if self.id in known_workers:
if isinstance(known_workers[self.id], type(self)):
# If a worker with this id already exists and it has the
# same type as the one being created, we copy all the attributes
# of the existing worker to this one.
self.__dict__.update(known_workers[self.id].__dict__)
else:
raise RuntimeError(
"Worker initialized with the same id and different types."
)
else:
hook.local_worker.add_worker(self)
for worker_id, worker in hook.local_worker._known_workers.items():
if worker_id not in self._known_workers:
self.add_worker(worker)
if self.id not in worker._known_workers:
worker.add_worker(self)
else:
# Make the local worker aware of itself
# self is the to-be-created local worker
self.add_worker(self)
# SECTION: Methods which MUST be overridden by subclasses
@abstractmethod
def _send_msg(self, message: bin, location: "BaseWorker"):
"""Sends message from one worker to another.
As BaseWorker implies, you should never instantiate this class by
itself. Instead, you should extend BaseWorker in a new class which
instantiates _send_msg and _recv_msg, each of which should specify the
exact way in which two workers communicate with each other. The easiest
example to study is VirtualWorker.
Args:
message: A binary message to be sent from one worker
to another.
location: A BaseWorker instance that lets you provide the
destination to send the message.
Raises:
NotImplementedError: Method not implemented error.
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def _recv_msg(self, message: bin):
"""Receives the message.
As BaseWorker implies, you should never instantiate this class by
itself. Instead, you should extend BaseWorker in a new class which
instantiates _send_msg and _recv_msg, each of which should specify the
exact way in which two workers communicate with each other. The easiest
example to study is VirtualWorker.
Args:
message: The binary message being received.
Raises:
NotImplementedError: Method not implemented error.
"""
raise NotImplementedError # pragma: no cover
def remove_worker_from_registry(self, worker_id):
"""Removes a worker from the dictionary of known workers.
Args:
worker_id: id to be removed
"""
del self._known_workers[worker_id]
def remove_worker_from_local_worker_registry(self):
"""Removes itself from the registry of hook.local_worker.
"""
self.hook.local_worker.remove_worker_from_registry(worker_id=self.id)
def load_data(self, data: List[Union[torch.Tensor, AbstractTensor]]) -> None:
"""Allows workers to be initialized with data when created
The method registers the tensor individual tensor objects.
Args:
data: A list of tensors
"""
if data:
for tensor in data:
self.register_obj(tensor)
tensor.owner = self
def send_msg(self, msg_type: int, message: str, location: "BaseWorker") -> object:
"""Implements the logic to send messages.
The message is serialized and sent to the specified location. The
response from the location (remote worker) is deserialized and
returned back.
Every message uses this method.
Args:
msg_type: A integer representing the message type.
message: A string representing the message being received.
location: A BaseWorker instance that lets you provide the
destination to send the message.
Returns:
The deserialized form of message from the worker at specified
location.
"""
if self.verbose:
print(f"worker {self} sending {msg_type} {message} to {location}")
# Step 0: combine type and message
message = messaging.Message(msg_type, message)
# Step 1: serialize the message to simple python objects
bin_message = sy.serde.serialize(message)
# Step 2: send the message and wait for a response
bin_response = self._send_msg(bin_message, location)
# Step 3: deserialize the response
response = sy.serde.deserialize(bin_response, worker=self)
return response
def recv_msg(self, bin_message: bin) -> bin:
"""Implements the logic to receive messages.
The binary message is deserialized and routed to the appropriate
function. And, the response serialized the returned back.
Every message uses this method.
Args:
bin_message: A binary serialized message.
Returns:
A binary message response.
"""
# Step -1: save message if log_msgs == True
if self.log_msgs:
self.msg_history.append(bin_message)
# Step 0: deserialize message
msg = sy.serde.deserialize(bin_message, worker=self)
(msg_type, contents) = (msg.msg_type, msg.contents)
if self.verbose:
print(f"worker {self} received {sy.codes.code2MSGTYPE[msg_type]} {contents}")
# Step 1: route message to appropriate function
response = self._message_router[msg_type](contents)
# Step 2: Serialize the message to simple python objects
bin_response = sy.serde.serialize(response)
return bin_response
# SECTION:recv_msg() uses self._message_router to route to these methods
# Each method corresponds to a MsgType enum.
def send(
self,
obj: Union[torch.Tensor, AbstractTensor],
workers: "BaseWorker",
ptr_id: Union[str, int] = None,
local_autograd=False,
preinitialize_grad=False,
garbage_collect_data=None,
) -> "pointers.ObjectPointer":
"""Sends tensor to the worker(s).
Send a syft or torch tensor/object and its child, sub-child, etc (all the
syft chain of children) to a worker, or a list of workers, with a given
remote storage address.
Args:
tensor: A syft/torch tensor/object object to send.
workers: A BaseWorker object representing the worker(s) that will
receive the object.
ptr_id: An optional string or integer indicating the remote id of
the object on the remote worker(s).
local_autograd: Use autograd system on the local machine instead of PyTorch's
autograd on the workers.
preinitialize_grad: Initialize gradient for AutogradTensors to a tensor
garbage_collect_data: argument passed down to create_pointer()
Example:
>>> import torch
>>> import syft as sy
>>> hook = sy.TorchHook(torch)
>>> bob = sy.VirtualWorker(hook)
>>> x = torch.Tensor([1, 2, 3, 4])
>>> x.send(bob, 1000)
Will result in bob having the tensor x with id 1000
Returns:
A PointerTensor object representing the pointer to the remote worker(s).
"""
if not isinstance(workers, list):
workers = [workers]
assert len(workers) > 0, "Please provide workers to receive the data"
if len(workers) == 1:
worker = workers[0]
else:
# If multiple workers are provided , you want to send the same tensor
# to all the workers. You'll get multiple pointers, or a pointer
# with different locations
raise NotImplementedError(
"Sending to multiple workers is not \
supported at the moment"
)
worker = self.get_worker(worker)
if hasattr(obj, "create_pointer"):
if ptr_id is None: # Define a remote id if not specified
ptr_id = sy.ID_PROVIDER.pop()
pointer = type(obj).create_pointer(
obj,
owner=self,
location=worker,
id_at_location=obj.id,
register=True,
ptr_id=ptr_id,
local_autograd=local_autograd,
preinitialize_grad=preinitialize_grad,
garbage_collect_data=garbage_collect_data,
)
else:
pointer = obj
# Send the object
self.send_obj(obj, worker)
return pointer
def execute_command(self, message: tuple) -> "pointers.PointerTensor":
"""
Executes commands received from other workers.
Args:
message: A tuple specifying the command and the args.
Returns:
A pointer to the result.
"""
(command_name, _self, args, kwargs), return_ids = message
# TODO add kwargs
command_name = command_name
# Handle methods
if _self is not None:
if type(_self) == int:
_self = BaseWorker.get_obj(self, _self)
if _self is None:
return
if type(_self) == str and _self == "self":
_self = self
if sy.torch.is_inplace_method(command_name):
getattr(_self, command_name)(*args, **kwargs)
return
else:
try:
response = getattr(_self, command_name)(*args, **kwargs)
except TypeError:
# TODO Andrew thinks this is gross, please fix. Instead need to properly deserialize strings
new_args = [
arg.decode("utf-8") if isinstance(arg, bytes) else arg for arg in args
]
response = getattr(_self, command_name)(*new_args, **kwargs)
# Handle functions
else:
# At this point, the command is ALWAYS a path to a
# function (i.e., torch.nn.functional.relu). Thus,
# we need to fetch this function and run it.
sy.torch.command_guard(command_name, "torch_modules")
paths = command_name.split(".")
command = self
for path in paths:
command = getattr(command, path)
response = command(*args, **kwargs)
# some functions don't return anything (such as .backward())
# so we need to check for that here.
if response is not None:
# Register response and create pointers for tensor elements
try:
response = sy.frameworks.torch.hook_args.register_response(
command_name, response, list(return_ids), self
)
return response
except ResponseSignatureError:
return_id_provider = sy.ID_PROVIDER
return_id_provider.set_next_ids(return_ids, check_ids=False)
return_id_provider.start_recording_ids()
response = sy.frameworks.torch.hook_args.register_response(
command_name, response, return_id_provider, self
)
new_ids = return_id_provider.get_recorded_ids()
raise ResponseSignatureError(new_ids)
def send_command(
self, recipient: "BaseWorker", message: str, return_ids: str = None
) -> Union[List["pointers.PointerTensor"], "pointers.PointerTensor"]:
"""
Sends a command through a message to a recipient worker.
Args:
recipient: A recipient worker.
message: A string representing the message being sent.
return_ids: A list of strings indicating the ids of the
tensors that should be returned as response to the command execution.
Returns:
A list of PointerTensors or a single PointerTensor if just one response is expected.
"""
if return_ids is None:
return_ids = tuple([sy.ID_PROVIDER.pop()])
message = (message, return_ids)
try:
ret_val = self.send_msg(codes.MSGTYPE.CMD, message, location=recipient)
except ResponseSignatureError as e:
ret_val = None
return_ids = e.ids_generated
if ret_val is None or type(ret_val) == bytes:
responses = []
for return_id in return_ids:
response = sy.PointerTensor(
location=recipient,
id_at_location=return_id,
owner=self,
id=sy.ID_PROVIDER.pop(),
)
responses.append(response)
if len(return_ids) == 1:
responses = responses[0]
else:
responses = ret_val
return responses
def get_obj(self, obj_id: Union[str, int]) -> object:
"""Returns the object from registry.
Look up an object from the registry using its ID.
Args:
obj_id: A string or integer id of an object to look up.
"""
obj = super().get_obj(obj_id)
# An object called with get_obj will be "with high probability" serialized
# and sent back, so it will be GCed but remote data is any shouldn't be
# deleted
if hasattr(obj, "child") and hasattr(obj.child, "set_garbage_collect_data"):
obj.child.set_garbage_collect_data(value=False)
if hasattr(obj, "private") and obj.private:
return None
return obj
def respond_to_obj_req(self, obj_id: Union[str, int]):
"""Returns the deregistered object from registry.
Args:
obj_id: A string or integer id of an object to look up.
"""
obj = self.get_obj(obj_id)
if hasattr(obj, "allowed_to_get") and not obj.allowed_to_get():
raise GetNotPermittedError()
else:
self.de_register_obj(obj)
return obj
def register_obj(self, obj: object, obj_id: Union[str, int] = None):
"""Registers the specified object with the current worker node.
Selects an id for the object, assigns a list of owners, and establishes
whether it's a pointer or not. This method is generally not used by the
whether it's a pointer or not. This method is generally not used by the
client and is instead used by internal processes (hooks and workers).
Args:
obj: A torch Tensor or Variable object to be registered.
obj_id (int or string): random integer between 0 and 1e10 or
string uniquely identifying the object.
"""
if not self.is_client_worker:
super().register_obj(obj, obj_id=obj_id)
# SECTION: convenience methods for constructing frequently used messages
def send_obj(self, obj: object, location: "BaseWorker"):
"""Send a torch object to a worker.
Args:
obj: A torch Tensor or Variable object to be sent.
location: A BaseWorker instance indicating the worker which should
receive the object.
"""
return self.send_msg(codes.MSGTYPE.OBJ, obj, location)
def request_obj(self, obj_id: Union[str, int], location: "BaseWorker") -> object:
"""Returns the requested object from specified location.
Args:
obj_id: A string or integer id of an object to look up.
location: A BaseWorker instance that lets you provide the lookup
location.
Returns:
A torch Tensor or Variable object.
"""
obj = self.send_msg(codes.MSGTYPE.OBJ_REQ, obj_id, location)
return obj
# SECTION: Manage the workers network
def get_worker(
self, id_or_worker: Union[str, int, "BaseWorker"], fail_hard: bool = False
) -> Union[str, int]:
"""Returns the worker id or instance.
Allows for resolution of worker ids to workers to happen automatically
while also making the current worker aware of new ones when discovered
through other processes.
If you pass in an ID, it will try to find the worker object reference
within self._known_workers. If you instead pass in a reference, it will
save that as a known_worker if it does not exist as one.
This method is useful because often tensors have to store only the ID
to a foreign worker which may or may not be known by the worker that is
de-serializing it at the time of deserialization.
Args:
id_or_worker: A string or integer id of the object to be returned
or the BaseWorker object itself.
fail_hard (bool): A boolean parameter indicating whether we want to
throw an exception when a worker is not registered at this
worker or we just want to log it.
Returns:
A string or integer id of the worker or the BaseWorker instance
representing the worker.
Example:
>>> import syft as sy
>>> hook = sy.TorchHook(verbose=False)
>>> me = hook.local_worker
>>> bob = sy.VirtualWorker(id="bob",hook=hook, is_client_worker=False)
>>> me.add_worker([bob])
>>> bob
<syft.core.workers.virtual.VirtualWorker id:bob>
>>> # we can get the worker using it's id (1)
>>> me.get_worker('bob')
<syft.core.workers.virtual.VirtualWorker id:bob>
>>> # or we can get the worker by passing in the worker
>>> me.get_worker(bob)
<syft.core.workers.virtual.VirtualWorker id:bob>
"""
if isinstance(id_or_worker, bytes):
id_or_worker = str(id_or_worker, "utf-8")
if isinstance(id_or_worker, (str, int)):
if id_or_worker in self._known_workers:
return self._known_workers[id_or_worker]
else:
if fail_hard:
raise WorkerNotFoundException
logger.warning("Worker %s couldn't recognize worker %s", self.id, id_or_worker)
return id_or_worker
else:
if id_or_worker.id not in self._known_workers:
self.add_worker(id_or_worker)
return id_or_worker
def add_worker(self, worker: "BaseWorker"):
"""Adds a single worker.
Adds a worker to the list of _known_workers internal to the BaseWorker.
Endows this class with the ability to communicate with the remote
worker being added, such as sending and receiving objects, commands,
or information about the network.
Args:
worker (:class:`BaseWorker`): A BaseWorker object representing the
pointer to a remote worker, which must have a unique id.
Example:
>>> import torch
>>> import syft as sy
>>> hook = sy.TorchHook(verbose=False)
>>> me = hook.local_worker
>>> bob = sy.VirtualWorker(id="bob",hook=hook, is_client_worker=False)
>>> me.add_worker([bob])
>>> x = torch.Tensor([1,2,3,4,5])
>>> x
1
2
3
4
5
[syft.core.frameworks.torch.tensor.FloatTensor of size 5]
>>> x.send(bob)
FloatTensor[_PointerTensor - id:9121428371 owner:0 loc:bob
id@loc:47416674672]
>>> x.get()
1
2
3
4
5
[syft.core.frameworks.torch.tensor.FloatTensor of size 5]
"""
if worker.id in self._known_workers:
logger.warning(
"Worker "
+ str(worker.id)
+ " already exists. Replacing old worker which could cause \
unexpected behavior"
)
self._known_workers[worker.id] = worker
return self
def add_workers(self, workers: List["BaseWorker"]):
"""Adds several workers in a single call.
Args:
workers: A list of BaseWorker representing the workers to add.
"""
for worker in workers:
self.add_worker(worker)
return self
def __str__(self):
"""Returns the string representation of BaseWorker.
A to-string method for all classes that extend BaseWorker.
Returns:
The Type and ID of the worker
Example:
A VirtualWorker instance with id 'bob' would return a string value of.
>>> import syft as sy
>>> bob = sy.VirtualWorker(id="bob")
>>> bob
<syft.workers.virtual.VirtualWorker id:bob>
Note:
__repr__ calls this method by default.
"""
out = "<"
out += str(type(self)).split("'")[1].split(".")[-1]
out += " id:" + str(self.id)
out += " #objects:" + str(len(self._objects))
out += ">"
return out
def __repr__(self):
"""Returns the official string representation of BaseWorker."""
return self.__str__()
def __getitem__(self, idx):
return self._objects[idx]
@staticmethod
def is_tensor_none(obj):
return obj is None
def request_is_remote_tensor_none(self, pointer: "pointers.PointerTensor"):
"""
Sends a request to the remote worker that holds the target a pointer if
the value of the remote tensor is None or not.
Note that the pointer must be valid: if there is no target (which is
different from having a target equal to None), it will return an error.
Args:
pointer: The pointer on which we can to get information.
Returns:
A boolean stating if the remote value is None.
"""
return self.send_msg(codes.MSGTYPE.IS_NONE, pointer, location=pointer.location)
@staticmethod
def get_tensor_shape(tensor: torch.Tensor) -> List:
"""
Returns the shape of a tensor casted into a list, to bypass the serialization of
a torch.Size object.
Args:
tensor: A torch.Tensor.
Returns:
A list containing the tensor shape.
"""
return list(tensor.shape)
def request_remote_tensor_shape(
self, pointer: "pointers.PointerTensor"
) -> "sy.hook.torch.Size":
"""
Sends a request to the remote worker that holds the target a pointer to
have its shape.
Args:
pointer: A pointer on which we want to get the shape.
Returns:
A torch.Size object for the shape.
"""
shape = self.send_msg(codes.MSGTYPE.GET_SHAPE, pointer, location=pointer.location)
return sy.hook.torch.Size(shape)
def fetch_plan(self, plan_id: Union[str, int]) -> "Plan": # noqa: F821
"""Fetchs a copy of a the plan with the given `plan_id` from the worker registry.
Args:
plan_id: A string indicating the plan id.
Returns:
A plan if a plan with the given `plan_id` exists. Returns None otherwise.
"""
if plan_id in self._objects:
candidate = self._objects[plan_id]
if isinstance(candidate, sy.Plan):
plan = candidate.copy()
plan.owner = sy.local_worker
return plan
return None
def search(self, *query: List[str]) -> List["pointers.PointerTensor"]:
"""Search for a match between the query terms and a tensor's Id, Tag, or Description.
Note that the query is an AND query meaning that every item in the list of strings (query*)
must be found somewhere on the tensor in order for it to be included in the results.
Args:
query: A list of strings to match against.
me: A reference to the worker calling the search.
Returns:
A list of PointerTensors.
"""
results = list()
for key, obj in self._objects.items():
found_something = True
for query_item in query:
# If deserialization produced a bytes object instead of a string,
# make sure it's turned back to a string or a fair comparison.
if isinstance(query_item, bytes):
query_item = query_item.decode("ascii")
match = False
if query_item == str(key):
match = True
if isinstance(obj, torch.Tensor):
if obj.tags is not None:
if query_item in obj.tags:
match = True
if obj.description is not None:
if query_item in obj.description:
match = True
if not match:
found_something = False
if found_something:
# set garbage_collect_data to False because if we're searching
# for a tensor we don't own, then it's probably someone else's
# decision to decide when to delete the tensor.
ptr = obj.create_pointer(garbage_collect_data=False, owner=sy.local_worker).wrap()
results.append(ptr)
return results
def deserialized_search(self, query_items: Tuple[str]) -> List["pointers.PointerTensor"]:
"""
Called when a message requesting a call to `search` is received.
The serialized arguments will arrive as a `tuple` and it needs to be
transformed to an arguments list.
Args:
query_items(tuple(str)): Tuple of items to search for. Should originate from the
deserialization of a message requesting a search operation.
Returns:
list(PointerTensor): List of matched tensors.
"""
return self.search(*query_items)
def generate_triple(
self, cmd: Callable, field: int, a_size: tuple, b_size: tuple, locations: list
):
"""Generates a multiplication triple and sends it to all locations.
Args:
cmd: An equation in einsum notation.
field: An integer representing the field size.
a_size: A tuple which is the size that a should be.
b_size: A tuple which is the size that b should be.
locations: A list of workers where the triple should be shared between.
Returns:
A triple of AdditiveSharedTensors such that c_shared = cmd(a_shared, b_shared).
"""
a = self.torch.randint(field, a_size)
b = self.torch.randint(field, b_size)
c = cmd(a, b)
a_shared = a.share(*locations, field=field, crypto_provider=self).child
b_shared = b.share(*locations, field=field, crypto_provider=self).child
c_shared = c.share(*locations, field=field, crypto_provider=self).child
return a_shared, b_shared, c_shared
@staticmethod
def create_message_execute_command(
command_name: codes.MSGTYPE, command_owner=None, return_ids=None, *args, **kwargs
):
"""helper function creating a message tuple for the execute_command call
Args:
command_name: name of the command that shall be called
command_owner: owner of the function (None for torch functions, "self" for classes derived from
workers.base or ptr_id for remote objects
return_ids: optionally set the ids of the return values (for remote objects)
*args: will be passed to the call of command_name
**kwargs: will be passed to the call of command_name
Returns:
tuple: (command_name, command_owner, args, kwargs), return_ids
"""
if return_ids is None:
return_ids = []
return messaging.Message(
codes.MSGTYPE.CMD, [[command_name, command_owner, args, kwargs], return_ids]
)
| 38.033295 | 112 | 0.603767 |
6f177ea8473ede5b654d83e9e773587d3af49901 | 364 | py | Python | Python/array_duplicates.py | thefool76/hacktoberfest2021 | 237751e17a4fc325ded29fca013fb9f5853cd27c | [
"CC0-1.0"
] | 448 | 2021-10-01T04:24:14.000Z | 2022-03-06T14:34:20.000Z | Python/array_duplicates.py | Chanaka-Madushan-Herath/hacktoberfest2021 | 8473df9e058ccb6049720dd372342e0ea60f0e59 | [
"CC0-1.0"
] | 282 | 2021-10-01T04:29:06.000Z | 2022-03-07T12:42:57.000Z | Python/array_duplicates.py | Chanaka-Madushan-Herath/hacktoberfest2021 | 8473df9e058ccb6049720dd372342e0ea60f0e59 | [
"CC0-1.0"
] | 1,807 | 2021-10-01T04:24:02.000Z | 2022-03-28T04:51:25.000Z | # Find duplicates in an array
def findDup(liArr):
liArr.sort()
liDuplicate=[]
for i in range(0, len(liArr)-1):
if liArr[i]==liArr[i+1]:
liDuplicate.append(liArr[i])
return liDuplicate
print("enter array elements: ")
sample = list(map(int, input().split()))
print(findDup(sample))
#print(findDup([2, 3, 1, 0, 2, 5,3])) | 22.75 | 40 | 0.604396 |
742d21f4a9cdd89a76f8266e9b93d01eab209245 | 15,496 | py | Python | python/ccxt/async_support/bit2c.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 4 | 2021-01-10T09:14:17.000Z | 2022-02-15T19:09:52.000Z | python/ccxt/async_support/bit2c.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 2 | 2020-05-12T12:53:48.000Z | 2020-07-05T12:59:52.000Z | python/ccxt/async_support/bit2c.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 4 | 2021-06-02T16:40:35.000Z | 2022-03-14T04:50:31.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
class bit2c(Exchange):
def describe(self):
return self.deep_extend(super(bit2c, self).describe(), {
'id': 'bit2c',
'name': 'Bit2C',
'countries': ['IL'], # Israel
'rateLimit': 3000,
'has': {
'CORS': False,
'fetchOpenOrders': True,
'fetchMyTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766119-3593220e-5ece-11e7-8b3a-5a041f6bcc3f.jpg',
'api': 'https://bit2c.co.il',
'www': 'https://www.bit2c.co.il',
'referral': 'https://bit2c.co.il/Aff/63bfed10-e359-420c-ab5a-ad368dab0baf',
'doc': [
'https://www.bit2c.co.il/home/api',
'https://github.com/OferE/bit2c',
],
},
'api': {
'public': {
'get': [
'Exchanges/{pair}/Ticker',
'Exchanges/{pair}/orderbook',
'Exchanges/{pair}/trades',
'Exchanges/{pair}/lasttrades',
],
},
'private': {
'post': [
'Merchant/CreateCheckout',
'Order/AddCoinFundsRequest',
'Order/AddFund',
'Order/AddOrder',
'Order/AddOrderMarketPriceBuy',
'Order/AddOrderMarketPriceSell',
'Order/CancelOrder',
'Order/AddCoinFundsRequest',
'Order/AddStopOrder',
'Payment/GetMyId',
'Payment/Send',
'Payment/Pay',
],
'get': [
'Account/Balance',
'Account/Balance/v2',
'Order/MyOrders',
'Order/GetById',
'Order/AccountHistory',
'Order/OrderHistory',
],
},
},
'markets': {
'BTC/NIS': {'id': 'BtcNis', 'symbol': 'BTC/NIS', 'base': 'BTC', 'quote': 'NIS', 'baseId': 'Btc', 'quoteId': 'Nis'},
'ETH/NIS': {'id': 'EthNis', 'symbol': 'ETH/NIS', 'base': 'ETH', 'quote': 'NIS', 'baseId': 'Eth', 'quoteId': 'Nis'},
'BCH/NIS': {'id': 'BchabcNis', 'symbol': 'BCH/NIS', 'base': 'BCH', 'quote': 'NIS', 'baseId': 'Bchabc', 'quoteId': 'Nis'},
'LTC/NIS': {'id': 'LtcNis', 'symbol': 'LTC/NIS', 'base': 'LTC', 'quote': 'NIS', 'baseId': 'Ltc', 'quoteId': 'Nis'},
'ETC/NIS': {'id': 'EtcNis', 'symbol': 'ETC/NIS', 'base': 'ETC', 'quote': 'NIS', 'baseId': 'Etc', 'quoteId': 'Nis'},
'BTG/NIS': {'id': 'BtgNis', 'symbol': 'BTG/NIS', 'base': 'BTG', 'quote': 'NIS', 'baseId': 'Btg', 'quoteId': 'Nis'},
'BSV/NIS': {'id': 'BchsvNis', 'symbol': 'BSV/NIS', 'base': 'BSV', 'quote': 'NIS', 'baseId': 'Bchsv', 'quoteId': 'Nis'},
'GRIN/NIS': {'id': 'GrinNis', 'symbol': 'GRIN/NIS', 'base': 'GRIN', 'quote': 'NIS', 'baseId': 'Grin', 'quoteId': 'Nis'},
},
'fees': {
'trading': {
'maker': 0.5 / 100,
'taker': 0.5 / 100,
},
},
'options': {
'fetchTradesMethod': 'public_get_exchanges_pair_lasttrades',
},
'exceptions': {
# {"error" : "Please provide valid APIkey"}
# {"error" : "Please provide valid nonce in Request UInt64.TryParse failed for nonce :"}
},
})
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privateGetAccountBalanceV2(params)
#
# {
# "AVAILABLE_NIS": 0.0,
# "NIS": 0.0,
# "LOCKED_NIS": 0.0,
# "AVAILABLE_BTC": 0.0,
# "BTC": 0.0,
# "LOCKED_BTC": 0.0,
# "AVAILABLE_ETH": 0.0,
# "ETH": 0.0,
# "LOCKED_ETH": 0.0,
# "AVAILABLE_BCHSV": 0.0,
# "BCHSV": 0.0,
# "LOCKED_BCHSV": 0.0,
# "AVAILABLE_BCHABC": 0.0,
# "BCHABC": 0.0,
# "LOCKED_BCHABC": 0.0,
# "AVAILABLE_LTC": 0.0,
# "LTC": 0.0,
# "LOCKED_LTC": 0.0,
# "AVAILABLE_ETC": 0.0,
# "ETC": 0.0,
# "LOCKED_ETC": 0.0,
# "AVAILABLE_BTG": 0.0,
# "BTG": 0.0,
# "LOCKED_BTG": 0.0,
# "AVAILABLE_GRIN": 0.0,
# "GRIN": 0.0,
# "LOCKED_GRIN": 0.0,
# "Fees": {
# "BtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "EthNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BchabcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "LtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "EtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BtgNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "LtcBtc": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BchsvNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "GrinNis": {"FeeMaker": 1.0, "FeeTaker": 1.0}
# }
# }
#
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
account = self.account()
currencyId = self.currency_id(code)
uppercase = currencyId.upper()
if uppercase in balance:
account['free'] = self.safe_float(balance, 'AVAILABLE_' + uppercase)
account['total'] = self.safe_float(balance, uppercase)
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'pair': self.market_id(symbol),
}
orderbook = await self.publicGetExchangesPairOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'pair': self.market_id(symbol),
}
ticker = await self.publicGetExchangesPairTicker(self.extend(request, params))
timestamp = self.milliseconds()
averagePrice = self.safe_float(ticker, 'av')
baseVolume = self.safe_float(ticker, 'a')
quoteVolume = None
if baseVolume is not None and averagePrice is not None:
quoteVolume = baseVolume * averagePrice
last = self.safe_float(ticker, 'll')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'h'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'l'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': averagePrice,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = self.options['fetchTradesMethod']
request = {
'pair': market['id'],
}
response = await getattr(self, method)(self.extend(request, params))
if isinstance(response, basestring):
raise ExchangeError(response)
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePostOrderAddOrder'
request = {
'Amount': amount,
'Pair': self.market_id(symbol),
}
if type == 'market':
method += 'MarketPrice' + self.capitalize(side)
else:
request['Price'] = price
request['Total'] = amount * price
request['IsBid'] = (side == 'buy')
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': response['NewOrder']['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
request = {
'id': id,
}
return await self.privatePostOrderCancelOrder(self.extend(request, params))
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.privateGetOrderMyOrders(self.extend(request, params))
orders = self.safe_value(response, market['id'], {})
asks = self.safe_value(orders, 'ask', [])
bids = self.safe_value(orders, 'bid', [])
return self.parse_orders(self.array_concat(asks, bids), market, since, limit)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'created')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
symbol = None
if market is not None:
symbol = market['symbol']
side = self.safe_value(order, 'type')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
id = self.safe_string(order, 'id')
status = self.safe_string(order, 'status')
return {
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': cost,
'trades': None,
'fee': None,
'info': order,
'average': None,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if limit is not None:
request['take'] = limit
request['take'] = limit
if since is not None:
request['toTime'] = self.ymd(self.milliseconds(), '.')
request['fromTime'] = self.ymd(since, '.')
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = await self.privateGetOrderOrderHistory(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
timestamp = None
id = None
price = None
amount = None
orderId = None
feeCost = None
side = None
reference = self.safe_string(trade, 'reference')
if reference is not None:
timestamp = self.safe_timestamp(trade, 'ticks')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'firstAmount')
reference_parts = reference.split('|') # reference contains 'pair|orderId|tradeId'
if market is None:
marketId = self.safe_string(trade, 'pair')
if marketId in self.markets_by_id[marketId]:
market = self.markets_by_id[marketId]
elif reference_parts[0] in self.markets_by_id:
market = self.markets_by_id[reference_parts[0]]
orderId = reference_parts[1]
id = reference_parts[2]
side = self.safe_integer(trade, 'action')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
feeCost = self.safe_float(trade, 'feeAmount')
else:
timestamp = self.safe_timestamp(trade, 'date')
id = self.safe_string(trade, 'tid')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
side = self.safe_value(trade, 'isBid')
if side is not None:
if side:
side = 'buy'
else:
side = 'sell'
symbol = None
if market is not None:
symbol = market['symbol']
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': price * amount,
'fee': {
'cost': feeCost,
'currency': 'NIS',
'rate': None,
},
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
url += '.json'
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': nonce,
}, params)
auth = self.urlencode(query)
if method == 'GET':
if query:
url += '?' + auth
else:
body = auth
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'key': self.apiKey,
'sign': self.decode(signature),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 39.329949 | 137 | 0.475607 |
fec18d288eb9381938b3ede83b8a9070c59665e7 | 381 | py | Python | src/migrations/0014_courier_fcm_token.py | AgnosticMe/swiftly | 1fc5fed6e90b8cbdcb6038303537aa0f82ae70d7 | [
"MIT"
] | null | null | null | src/migrations/0014_courier_fcm_token.py | AgnosticMe/swiftly | 1fc5fed6e90b8cbdcb6038303537aa0f82ae70d7 | [
"MIT"
] | null | null | null | src/migrations/0014_courier_fcm_token.py | AgnosticMe/swiftly | 1fc5fed6e90b8cbdcb6038303537aa0f82ae70d7 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-29 10:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0013_transaction_status'),
]
operations = [
migrations.AddField(
model_name='courier',
name='fcm_token',
field=models.TextField(blank=True),
),
]
| 20.052632 | 47 | 0.593176 |
4b52ae4625affef5671c97c7282bae5ad2b80aca | 857 | py | Python | valleydeight/base.py | benkrikler/valedictory | 36968f4da5bb5a6c4e85bd7f02fdc32db08e9579 | [
"MIT"
] | 1 | 2019-02-22T17:12:48.000Z | 2019-02-22T17:12:48.000Z | valleydeight/base.py | benkrikler/valedictory | 36968f4da5bb5a6c4e85bd7f02fdc32db08e9579 | [
"MIT"
] | 1 | 2019-02-22T16:40:36.000Z | 2019-02-22T16:40:36.000Z | valleydeight/base.py | benkrikler/valleydeight | 36968f4da5bb5a6c4e85bd7f02fdc32db08e9579 | [
"MIT"
] | null | null | null | from .exceptions import ValidatorException
__all__ = ["Pass"]
class BaseValidator(object):
def __init__(self, node=""):
self.__name = node
@property
def node(self):
return self.__name
@property
def options(self):
return tuple()
def __call__(self, instance):
raise NotImplementedError
def _raise(self, instance=None, msg=None):
raise ValidatorException(msg)
def opt(self, option, value):
if option not in self.options:
raise KeyError("{} not an option for {} validators".format(option, type(self)))
setattr(self, option, value)
return self
def opts(self, **kwargs):
for k, v in kwargs.items():
self.opt(k, v)
return self
class Pass(BaseValidator):
def __call__(self, instance):
return instance
| 21.425 | 91 | 0.613769 |
b443139196fe6b693102b2d777e546131399ae94 | 773 | py | Python | base/test_views.py | tosp/ProjectProto | 5e2c73d401da9c48088cbe573723e224d29f2c1a | [
"MIT"
] | null | null | null | base/test_views.py | tosp/ProjectProto | 5e2c73d401da9c48088cbe573723e224d29f2c1a | [
"MIT"
] | null | null | null | base/test_views.py | tosp/ProjectProto | 5e2c73d401da9c48088cbe573723e224d29f2c1a | [
"MIT"
] | null | null | null | from django.test import TestCase
from splinter import Browser
class TestBaseViews(TestCase):
def setUp(self):
self.browser = Browser('chrome')
def tearDown(self):
self.browser.quit()
def test_home(self):
self.browser.visit('http://localhost:8000')
test_string = 'Hello, world!'
if self.browser.is_text_present(test_string):
self.assertTrue(True)
def test_robots(self):
self.browser.visit('http://localhost:8000/robots.txt')
if self.browser.is_text_present('robotstxt'):
self.assertTrue(True)
def test_humans(self):
self.browser.visit('http://localhost:8000/humans.txt')
if self.browser.is_text_present('humanstxt'):
self.assertTrue(True)
| 27.607143 | 62 | 0.649418 |
c42fd12761fcc8e0e3abacdb1ba550904e829133 | 817 | py | Python | pypal/geometry/capsule.py | code-forger/PyPal | f880f2bc9a4aa01e1c9aa2cfc956202e91ab1a69 | [
"BSD-2-Clause"
] | null | null | null | pypal/geometry/capsule.py | code-forger/PyPal | f880f2bc9a4aa01e1c9aa2cfc956202e91ab1a69 | [
"BSD-2-Clause"
] | null | null | null | pypal/geometry/capsule.py | code-forger/PyPal | f880f2bc9a4aa01e1c9aa2cfc956202e91ab1a69 | [
"BSD-2-Clause"
] | null | null | null | from pypal import private_globals as _pal
import ctypes as c
import weakref
from geometry import Geometry
class Capsule(Geometry):
""" A Capsule Geometry. """
def __init__(self, pos, size, rotation=[0,0,0], mass=1):
"""
Parameters:
pos: ``float[3]`` The x, y, z, position of the Capsule.
size: ``float[2]`` The radius, height of the Capsule.
rotation: ``float[3]`` The rx, ry, rz rotation of the Capsule.
mass: ``float`` The mass of the Capsule.
"""
self.obj = _pal.lib.geometry_capsule_create(c.c_float(pos[0]),c.c_float(pos[1]),c.c_float(pos[2]),c.c_float(rotation[0]),c.c_float(rotation[1]),c.c_float(rotation[2]),c.c_float(size[0]),c.c_float(size[1]),c.c_float(mass))
self._geometry = _pal.lib.cast_capsule_geometry(self.obj) | 51.0625 | 229 | 0.645043 |
dbd95dfd2c90383b40aca29c847bdede8dbd8317 | 10,493 | py | Python | src/train_and_run_model.py | bithikajain/House-Number-Detection | 15dc4dadc54e00e04fdabc6c136777767c538b08 | [
"MIT"
] | null | null | null | src/train_and_run_model.py | bithikajain/House-Number-Detection | 15dc4dadc54e00e04fdabc6c136777767c538b08 | [
"MIT"
] | null | null | null | src/train_and_run_model.py | bithikajain/House-Number-Detection | 15dc4dadc54e00e04fdabc6c136777767c538b08 | [
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
sys.path.append('/home/bithika/src/House-Number-Detection')
import time
from datetime import timedelta
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.io import loadmat
from skimage import color
from skimage import io
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
import argparse
import h5py
from sklearn.metrics import confusion_matrix
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
plt.rcParams['figure.figsize'] = (16.0, 4.0)
from plot_utils import plot_confusion_metric, plot_train_images, plot_learning_rate
from preprocess_utils import load_preprocessed_data, prepare_log_dir, get_batch, flatten
###############################################################################
###############################################################################
# Argument Parsing
#
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('--base-dir', type=str,
default='/home/bithika/src/House-Number-Detection', help='Input base directory ')
parser.add_argument('--train-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/raw/train_32x32.mat', help='Input data directory')
parser.add_argument('--test-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/raw/test_32x32.mat', help='Input data directory')
parser.add_argument('--output-dir', type=str,
default='/home/bithika/src/House-Number-Detection/reports', help='Input data directory')
parser.add_argument('--processed-data-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/processed', help='processed data directory')
parser.add_argument('--validation-data-fraction', type=float,
default=0.1, help='validation dataset split fraction (default: 0.1)')
parser.add_argument('--summary-dir', type=str,
default='/home/bithika/src/House-Number-Detection/models', help='summary directory ')
args = parser.parse_args()
###############################################################################
###############################################################################
max_epochs = 10
batch_size = 512
#Discarding or fuse % of neurons in Train mode
discard_per = 0.7
for d in [args.summary_dir, args.output_dir]:
try: os.makedirs(d)
except: pass
TENSORBOARD_SUMMARIES_DIR = os.path.join(args.summary_dir, 'svhn_classifier_logs')
print('Loading data...')
X_train, y_train, X_test, y_test, X_val, y_val = load_preprocessed_data('SVHN_grey.h5',
args.processed_data_dir)
num_examples = X_train.shape[0]
print('Training set', X_train.shape, y_train.shape)
print('Validation set', X_val.shape, y_val.shape)
print('Test set', X_test.shape, y_test.shape)
### placeholder variable
comp = 32*32
tf.logging.set_verbosity(tf.logging.INFO)
# Our application logic will be added here
x = tf.placeholder(tf.float32, shape = [None, 32, 32, 1], name='Input_Data')
y = tf.placeholder(tf.float32, shape = [None, 10], name='Input_Labels')
y_cls = tf.argmax(y, 1)
discard_rate = tf.placeholder(tf.float32, name='Discard_rate')
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
prepare_log_dir(TENSORBOARD_SUMMARIES_DIR)
###############################################################################
def cnn_model_fn(features):
"""Model function for CNN.
INPUT -> [CONV -> RELU -> CONV -> RELU -> POOL] -> DROPOUT -> [FC -> RELU] -> FC
"""
# Input Layer
input_layer = tf.reshape(features, [-1, 32, 32, 1], name='Reshaped_Input')
# Convolutional Layer #1
#with tf.name_scope('Conv1 Layer + ReLU'):
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
#with tf.name_scope('Pool1 Layer'):
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
#with tf.name_scope('Conv2 Layer + ReLU'):
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
#with tf.name_scope('Pool2 Layer'):
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 8 * 8 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=256, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=discard_rate)
# Logits Layer
#with tf.name_scope('Logits Layer'):
logits = tf.layers.dense(inputs=dropout, units=10)
return logits
###############################################################################
###############################################################################
#
# Prediction and Optimizer
#
#
#with tf.name_scope('Model Prediction'):
prediction = cnn_model_fn(x)
prediction_cls = tf.argmax(prediction, 1)
#with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(
onehot_labels=y, logits=prediction))
#tf.summary.scalar('loss', loss)
#with tf.name_scope('Adam Optimizer'):
optimizer = tf.train.AdamOptimizer().minimize(loss)
###############################################################################
###############################################################################
#
# Accuracy
#
#
# Predicted class equals the true class of each image?
correct_prediction = tf.equal(prediction_cls, y_cls)
# Cast predictions to float and calculate the mean
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
###############################################################################
###############################################################################
#
# Tensorflow session
#
#
sess = tf.Session()
sess.run(tf.global_variables_initializer())
### save model ckpts
saver = tf.train.Saver()
save_dir = os.path.join(args.summary_dir, 'checkpoints/')
# Create directory if it does not exist
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, 'svhn_single_greyscale')
###############################################################################
###############################################################################
#
# restore variable
#
#
#saver.restore(sess=session, save_path=save_path)
#with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
## To calculate total time of training
train_loss = []
valid_loss = []
start_time = time.time()
for epoch in range(max_epochs):
print ('Training .........')
epoch_loss = 0
print ()
print ('Epoch ', epoch+1 , ': ........ \n')
step = 0
## Training epochs ....
for (epoch_x , epoch_y) in get_batch(X_train, y_train, batch_size):
_, train_accu, c = sess.run([optimizer, accuracy, loss], feed_dict={x: epoch_x, y: epoch_y, discard_rate: discard_per})
train_loss.append(c)
if(step%40 == 0):
print ("Step:", step, ".....", "\nMini-Batch Loss : ", c)
print('Mini-Batch Accuracy :' , train_accu*100.0, '%')
## Validating prediction and summaries
accu = 0.0
for (epoch_x , epoch_y) in get_batch(X_val, y_val, 512):
correct, _c = sess.run([correct_prediction, loss], feed_dict={x: epoch_x, y: epoch_y, discard_rate: 0.0})
valid_loss.append(_c)
accu+= np.sum(correct[correct == True])
print('Validation Accuracy :' , accu*100.0/y_val.shape[0], '%')
print ()
step = step + 1
print ('Epoch', epoch+1, 'completed out of ', max_epochs)
## Calculate net time
time_diff = time.time() - start_time
## Testing prediction and summaries
accu = 0.0
for (epoch_x , epoch_y) in get_batch(X_test, y_test, 512):
correct = sess.run([correct_prediction], feed_dict={x: epoch_x, y: epoch_y, discard_rate: 0.0})
accu+= np.sum(correct[correct == True])
print('Test Accuracy :' , accu*100.0/y_test.shape[0], '%')
print("Time usage: " + str(timedelta(seconds=int(round(time_diff)))))
print ()
saver.save(sess=sess, save_path=save_path)
### save model ckpts
saver = tf.train.Saver()
save_dir = os.path.join(args.summary_dir, 'checkpoints/')
# Create directory if it does not exist
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, 'svhn_single_greyscale')
###############################################################################
###############################################################################
#
# Plotting
#
#
print('Plotting ')
plot_train_images(X_train, 3, 6, y_train, args.output_dir, 'train_reults.png' )
test_pred = []
for (epoch_x , epoch_y) in get_batch(X_test, y_test, 512):
correct = sess.run([prediction_cls], feed_dict={x: epoch_x, y: epoch_y, discard_rate: 0.0})
test_pred.append((np.asarray(correct, dtype=int)).T)
print ('Predicting completed')
flat_array = flatten(test_pred)
flat_array = (flat_array.T)
flat_array = flat_array[0]
print('Plotting confusion metric')
plot_confusion_metric(y_test, flat_array, args.output_dir,'confusion_metric.png' )
incorrect = flat_array != np.argmax(y_test, axis=1)
print('Plotting misclassified results')
# Select the incorrectly classified examples
images = X_test[incorrect]
cls_true = y_test[incorrect]
cls_pred = flat_array[incorrect]
# Plot the mis-classified examples
plot_train_images(images, 3, 6, cls_true, args.output_dir, 'missclassified_test_reults.png', cls_pred)
print('Plotting correctly classified results')
# Find the incorrectly classified examples
correct = np.invert(incorrect)
# Select the correctly classified examples
images = X_test[correct]
cls_true = y_test[correct]
cls_pred = flat_array[correct]
# Plot the mis-classified examples
plot_train_images(images, 3, 6, cls_true, args.output_dir, 'correct_classified_test_reults.png', cls_pred)
# Plot the learning rate
plot_learning_rate(train_loss, valid_loss, args.output_dir, 'learning_curves.png') | 35.093645 | 127 | 0.606404 |
83ca928bd90ba6e0929177e0ce7574aa39898e7f | 9,791 | py | Python | implementations/acgan/acgan.py | jasonlai777/Pytorch_GAN | b2fc44a38400b58c2efd359afb5ec74960881263 | [
"MIT"
] | null | null | null | implementations/acgan/acgan.py | jasonlai777/Pytorch_GAN | b2fc44a38400b58c2efd359afb5ec74960881263 | [
"MIT"
] | null | null | null | implementations/acgan/acgan.py | jasonlai777/Pytorch_GAN | b2fc44a38400b58c2efd359afb5ec74960881263 | [
"MIT"
] | null | null | null | import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
#import torchvision.datasets.ImageFolder as IF
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from dataset import Datasets
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs("images", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=100, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--n_classes", type=int, default=1, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=256, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=10, help="interval between image sampling")
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.label_emb = nn.Embedding(opt.n_classes, opt.latent_dim)
self.init_size = opt.img_size // 4 # Initial size before upsampling
self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, noise, labels):
gen_input = torch.mul(self.label_emb(labels), noise)
#print("INPUT: " + str(gen_input.shape))
out = self.l1(gen_input)
#print("G1: " + str(out.shape))
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
#print("G2: " + str(out.shape))
img = self.conv_blocks(out)
#print("G: " + str(img.shape))
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
"""Returns layers of each discriminator block"""
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.conv_blocks = nn.Sequential(
*discriminator_block(opt.channels, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = opt.img_size // 2 ** 4
# Output layers
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, opt.n_classes), nn.Softmax())
def forward(self, img):
out = self.conv_blocks(img)
#print(out.shape)
out = out.view(out.shape[0], -1)
#print(out.shape)
validity = self.adv_layer(out)
#print(validity.shape)
label = self.aux_layer(out)
#print(label.shape)
return validity, label
# Loss functions
adversarial_loss = torch.nn.BCELoss()
auxiliary_loss = torch.nn.CrossEntropyLoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
auxiliary_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
'''os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)'''
#dataset = Datasets("../../data/image", opt)
transform1 = transforms.Compose([
# you can add other transformations in this list
transforms.Resize(opt.img_size),
transforms.Pad(2000, fill=0),
transforms.CenterCrop(opt.img_size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
dataset = datasets.ImageFolder("../../data/image/training_set", transform=transform1, target_transform=None, is_valid_file=None)
print(dataset.__len__())
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=opt.batch_size,
shuffle=True,
)
dliter = iter(dataloader)
j = 0
'''
for data in dataloader:
#print(str(j)+'\n')
save_image(data[0].data, "input%d.png" % j, nrow=1, normalize=True)
j+=1
#exit()
'''
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
def sample_image(n_row, batches_done):
"""Saves a grid of generated digits ranging from 0 to n_classes"""
# Sample noise
z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))
# Get labels ranging from 0 to n_classes for n rows
labels = np.array([num for _ in range(n_row) for num in range(n_row)])
labels = Variable(LongTensor(labels))
gen_imgs = generator(z, labels)
save_image(gen_imgs.data, "images/%d.png" % batches_done, nrow=n_row, normalize=True)
# ----------
# Training
# ----------
e = 0
#print(dataloader)
for epoch in range(opt.n_epochs):
for i, (imgs, labels) in enumerate(dataloader):
batch_size = imgs.shape[0]
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = Variable(labels.type(LongTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim))))
gen_labels = Variable(LongTensor(np.random.randint(0, opt.n_classes, batch_size)))
# Generate a batch of images
gen_imgs = generator(z, gen_labels)
# Loss measures generator's ability to fool the discriminator
validity, pred_label = discriminator(gen_imgs)
g_loss = 0.5 * (adversarial_loss(validity, valid) + auxiliary_loss(pred_label, gen_labels))
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Loss for real images
real_pred, real_aux = discriminator(real_imgs)
d_real_loss = (adversarial_loss(real_pred, valid) + auxiliary_loss(real_aux, labels)) / 2
# Loss for fake images
fake_pred, fake_aux = discriminator(gen_imgs.detach())
d_fake_loss = (adversarial_loss(fake_pred, fake) + auxiliary_loss(fake_aux, gen_labels)) / 2
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# Calculate discriminator accuracy
pred = np.concatenate([real_aux.data.cpu().numpy(), fake_aux.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), gen_labels.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
if e == epoch :#and i == len(dataloader)-1:
e+=1
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
sample_image(n_row=1, batches_done=batches_done)#############n_row = n_class
| 35.474638 | 128 | 0.639771 |
6c76591f8d36153f2174bfb66c8fe0ffbe22c3d7 | 280 | py | Python | re_charset_dot.py | Kalpavrikshika/python_modules | 9f338ab006dd5653fd7f65ff253bc50e0fd61fc6 | [
"Apache-2.0"
] | 1 | 2018-07-02T03:37:03.000Z | 2018-07-02T03:37:03.000Z | re_charset_dot.py | Kalpavrikshika/python_modules | 9f338ab006dd5653fd7f65ff253bc50e0fd61fc6 | [
"Apache-2.0"
] | null | null | null | re_charset_dot.py | Kalpavrikshika/python_modules | 9f338ab006dd5653fd7f65ff253bc50e0fd61fc6 | [
"Apache-2.0"
] | null | null | null | from re_testpattern import test_patterns
test_patterns(
'abbaabbba',
[('a.', 'a followed by any one character'),
('b.', 'b followed by any one character '),
('a.*b', 'a followed by anything , ending in b'),
('a.*?b', 'a followed by anything, ending in b')],
) | 31.111111 | 54 | 0.617857 |
d19edaedc7388a04c21d37131129ea623a85f82f | 30,922 | py | Python | test/functional/swift_test_client.py | tsg-/swift-ec | d5cc4d274696b587e820050774db4ca2c3d9c85c | [
"Apache-2.0"
] | null | null | null | test/functional/swift_test_client.py | tsg-/swift-ec | d5cc4d274696b587e820050774db4ca2c3d9c85c | [
"Apache-2.0"
] | null | null | null | test/functional/swift_test_client.py | tsg-/swift-ec | d5cc4d274696b587e820050774db4ca2c3d9c85c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import httplib
import os
import random
import socket
import StringIO
import time
import urllib
import simplejson as json
from nose import SkipTest
from xml.dom import minidom
from swiftclient import get_auth
from test import safe_repr
class AuthenticationFailed(Exception):
pass
class RequestError(Exception):
pass
class ResponseError(Exception):
def __init__(self, response, method=None, path=None):
self.status = response.status
self.reason = response.reason
self.method = method
self.path = path
self.headers = response.getheaders()
for name, value in self.headers:
if name.lower() == 'x-trans-id':
self.txid = value
break
else:
self.txid = None
super(ResponseError, self).__init__()
def __str__(self):
return repr(self)
def __repr__(self):
return '%d: %r (%r %r) txid=%s' % (
self.status, self.reason, self.method, self.path, self.txid)
def listing_empty(method):
for i in xrange(6):
if len(method()) == 0:
return True
time.sleep(2 ** i)
return False
def listing_items(method):
marker = None
once = True
items = []
while once or items:
for i in items:
yield i
if once or marker:
if marker:
items = method(parms={'marker': marker})
else:
items = method()
if len(items) == 10000:
marker = items[-1]
else:
marker = None
once = False
else:
items = []
class Connection(object):
def __init__(self, config):
for key in 'auth_host auth_port auth_ssl username password'.split():
if key not in config:
raise SkipTest(
"Missing required configuration parameter: %s" % key)
self.auth_host = config['auth_host']
self.auth_port = int(config['auth_port'])
self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
self.auth_prefix = config.get('auth_prefix', '/')
self.auth_version = str(config.get('auth_version', '1'))
self.account = config.get('account')
self.username = config['username']
self.password = config['password']
self.storage_host = None
self.storage_port = None
self.storage_url = None
self.conn_class = None
def get_account(self):
return Account(self, self.account)
def authenticate(self, clone_conn=None):
if clone_conn:
self.conn_class = clone_conn.conn_class
self.storage_host = clone_conn.storage_host
self.storage_url = clone_conn.storage_url
self.storage_port = clone_conn.storage_port
self.storage_token = clone_conn.storage_token
return
if self.auth_version == "1":
auth_path = '%sv1.0' % (self.auth_prefix)
if self.account:
auth_user = '%s:%s' % (self.account, self.username)
else:
auth_user = self.username
else:
auth_user = self.username
auth_path = self.auth_prefix
auth_scheme = 'https://' if self.auth_ssl else 'http://'
auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
auth_url = auth_scheme + auth_netloc + auth_path
(storage_url, storage_token) = get_auth(
auth_url, auth_user, self.password, snet=False,
tenant_name=self.account, auth_version=self.auth_version,
os_options={})
if not (storage_url and storage_token):
raise AuthenticationFailed()
x = storage_url.split('/')
if x[0] == 'http:':
self.conn_class = httplib.HTTPConnection
self.storage_port = 80
elif x[0] == 'https:':
self.conn_class = httplib.HTTPSConnection
self.storage_port = 443
else:
raise ValueError('unexpected protocol %s' % (x[0]))
self.storage_host = x[2].split(':')[0]
if ':' in x[2]:
self.storage_port = int(x[2].split(':')[1])
# Make sure storage_url is a string and not unicode, since
# keystoneclient (called by swiftclient) returns them in
# unicode and this would cause troubles when doing
# no_safe_quote query.
self.storage_url = str('/%s/%s' % (x[3], x[4]))
self.storage_token = storage_token
self.http_connect()
return self.storage_url, self.storage_token
def cluster_info(self):
"""
Retrieve the data in /info, or {} on 404
"""
status = self.make_request('GET', '/info',
cfg={'absolute_path': True})
if status == 404:
return {}
if not 200 <= status <= 299:
raise ResponseError(self.response, 'GET', '/info')
return json.loads(self.response.read())
def http_connect(self):
self.connection = self.conn_class(self.storage_host,
port=self.storage_port)
#self.connection.set_debuglevel(3)
def make_path(self, path=None, cfg=None):
if path is None:
path = []
if cfg is None:
cfg = {}
if cfg.get('version_only_path'):
return '/' + self.storage_url.split('/')[1]
if path:
quote = urllib.quote
if cfg.get('no_quote') or cfg.get('no_path_quote'):
quote = lambda x: x
return '%s/%s' % (self.storage_url,
'/'.join([quote(i) for i in path]))
else:
return self.storage_url
def make_headers(self, hdrs, cfg=None):
if cfg is None:
cfg = {}
headers = {}
if not cfg.get('no_auth_token'):
headers['X-Auth-Token'] = self.storage_token
if isinstance(hdrs, dict):
headers.update(hdrs)
return headers
def make_request(self, method, path=None, data='', hdrs=None, parms=None,
cfg=None):
if path is None:
path = []
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if not cfg.get('absolute_path'):
# Set absolute_path=True to make a request to exactly the given
# path, not storage path + given path. Useful for
# non-account/container/object requests.
path = self.make_path(path, cfg=cfg)
headers = self.make_headers(hdrs, cfg=cfg)
if isinstance(parms, dict) and parms:
quote = urllib.quote
if cfg.get('no_quote') or cfg.get('no_parms_quote'):
quote = lambda x: x
query_args = ['%s=%s' % (quote(x), quote(str(y)))
for (x, y) in parms.items()]
path = '%s?%s' % (path, '&'.join(query_args))
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
headers['Content-Length'] = cfg.get('set_content_length')
else:
headers['Content-Length'] = len(data)
def try_request():
self.http_connect()
self.connection.request(method, path, data, headers)
return self.connection.getresponse()
self.response = None
try_count = 0
fail_messages = []
while try_count < 5:
try_count += 1
try:
self.response = try_request()
except httplib.HTTPException as e:
fail_messages.append(safe_repr(e))
continue
if self.response.status == 401:
fail_messages.append("Response 401")
self.authenticate()
continue
elif self.response.status == 503:
fail_messages.append("Response 503")
if try_count != 5:
time.sleep(5)
continue
break
if self.response:
return self.response.status
request = "{method} {path} headers: {headers} data: {data}".format(
method=method, path=path, headers=headers, data=data)
raise RequestError('Unable to complete http request: %s. '
'Attempts: %s, Failures: %s' %
(request, len(fail_messages), fail_messages))
def put_start(self, path, hdrs=None, parms=None, cfg=None, chunked=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
self.http_connect()
path = self.make_path(path, cfg)
headers = self.make_headers(hdrs, cfg=cfg)
if chunked:
headers['Transfer-Encoding'] = 'chunked'
headers.pop('Content-Length', None)
if isinstance(parms, dict) and parms:
quote = urllib.quote
if cfg.get('no_quote') or cfg.get('no_parms_quote'):
quote = lambda x: x
query_args = ['%s=%s' % (quote(x), quote(str(y)))
for (x, y) in parms.items()]
path = '%s?%s' % (path, '&'.join(query_args))
self.connection = self.conn_class(self.storage_host,
port=self.storage_port)
#self.connection.set_debuglevel(3)
self.connection.putrequest('PUT', path)
for key, value in headers.iteritems():
self.connection.putheader(key, value)
self.connection.endheaders()
def put_data(self, data, chunked=False):
if chunked:
self.connection.send('%x\r\n%s\r\n' % (len(data), data))
else:
self.connection.send(data)
def put_end(self, chunked=False):
if chunked:
self.connection.send('0\r\n\r\n')
self.response = self.connection.getresponse()
self.connection.close()
return self.response.status
class Base(object):
def __str__(self):
return self.name
def header_fields(self, required_fields, optional_fields=None):
if optional_fields is None:
optional_fields = ()
headers = dict(self.conn.response.getheaders())
ret = {}
for field in required_fields:
if field[1] not in headers:
raise ValueError("%s was not found in response header" %
(field[1]))
try:
ret[field[0]] = int(headers[field[1]])
except ValueError:
ret[field[0]] = headers[field[1]]
for field in optional_fields:
if field[1] not in headers:
continue
try:
ret[field[0]] = int(headers[field[1]])
except ValueError:
ret[field[0]] = headers[field[1]]
return ret
class Account(Base):
def __init__(self, conn, name):
self.conn = conn
self.name = str(name)
def update_metadata(self, metadata=None, cfg=None):
if metadata is None:
metadata = {}
if cfg is None:
cfg = {}
headers = dict(("X-Account-Meta-%s" % k, v)
for k, v in metadata.items())
self.conn.make_request('POST', self.path, hdrs=headers, cfg=cfg)
if not 200 <= self.conn.response.status <= 299:
raise ResponseError(self.conn.response, 'POST',
self.conn.make_path(self.path))
return True
def container(self, container_name):
return Container(self.conn, self.name, container_name)
def containers(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
if format_type is None and 'format' in parms:
del parms['format']
status = self.conn.make_request('GET', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
if status == 200:
if format_type == 'json':
conts = json.loads(self.conn.response.read())
for cont in conts:
cont['name'] = cont['name'].encode('utf-8')
return conts
elif format_type == 'xml':
conts = []
tree = minidom.parseString(self.conn.response.read())
for x in tree.getElementsByTagName('container'):
cont = {}
for key in ['name', 'count', 'bytes']:
cont[key] = x.getElementsByTagName(key)[0].\
childNodes[0].nodeValue
conts.append(cont)
for cont in conts:
cont['name'] = cont['name'].encode('utf-8')
return conts
else:
lines = self.conn.response.read().split('\n')
if lines and not lines[-1]:
lines = lines[:-1]
return lines
elif status == 204:
return []
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
def delete_containers(self):
for c in listing_items(self.containers):
cont = self.container(c)
if not cont.delete_recursive():
return False
return listing_empty(self.containers)
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 204:
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
fields = [['object_count', 'x-account-object-count'],
['container_count', 'x-account-container-count'],
['bytes_used', 'x-account-bytes-used']]
return self.header_fields(fields)
@property
def path(self):
return []
class Container(Base):
def __init__(self, conn, account, name):
self.conn = conn
self.account = str(account)
self.name = str(name)
def create(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
return self.conn.make_request('PUT', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) in (201, 202)
def delete(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
return self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) == 204
def delete_files(self):
for f in listing_items(self.files):
file_item = self.file(f)
if not file_item.delete():
return False
return listing_empty(self.files)
def delete_recursive(self):
return self.delete_files() and self.delete()
def file(self, file_name):
return File(self.conn, self.account, self.name, file_name)
def files(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
if format_type is None and 'format' in parms:
del parms['format']
status = self.conn.make_request('GET', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
if status == 200:
if format_type == 'json':
files = json.loads(self.conn.response.read())
for file_item in files:
file_item['name'] = file_item['name'].encode('utf-8')
file_item['content_type'] = file_item['content_type'].\
encode('utf-8')
return files
elif format_type == 'xml':
files = []
tree = minidom.parseString(self.conn.response.read())
for x in tree.getElementsByTagName('object'):
file_item = {}
for key in ['name', 'hash', 'bytes', 'content_type',
'last_modified']:
file_item[key] = x.getElementsByTagName(key)[0].\
childNodes[0].nodeValue
files.append(file_item)
for file_item in files:
file_item['name'] = file_item['name'].encode('utf-8')
file_item['content_type'] = file_item['content_type'].\
encode('utf-8')
return files
else:
content = self.conn.response.read()
if content:
lines = content.split('\n')
if lines and not lines[-1]:
lines = lines[:-1]
return lines
else:
return []
elif status == 204:
return []
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
if self.conn.response.status == 204:
required_fields = [['bytes_used', 'x-container-bytes-used'],
['object_count', 'x-container-object-count']]
optional_fields = [['versions', 'x-versions-location']]
return self.header_fields(required_fields, optional_fields)
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
@property
def path(self):
return [self.name]
class File(Base):
def __init__(self, conn, account, container, name):
self.conn = conn
self.account = str(account)
self.container = str(container)
self.name = str(name)
self.chunked_write_in_progress = False
self.content_type = None
self.size = None
self.metadata = {}
def make_headers(self, cfg=None):
if cfg is None:
cfg = {}
headers = {}
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
headers['Content-Length'] = cfg.get('set_content_length')
elif self.size:
headers['Content-Length'] = self.size
else:
headers['Content-Length'] = 0
if cfg.get('no_content_type'):
pass
elif self.content_type:
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = 'application/octet-stream'
for key in self.metadata:
headers['X-Object-Meta-' + key] = self.metadata[key]
return headers
@classmethod
def compute_md5sum(cls, data):
block_size = 4096
if isinstance(data, str):
data = StringIO.StringIO(data)
checksum = hashlib.md5()
buff = data.read(block_size)
while buff:
checksum.update(buff)
buff = data.read(block_size)
data.seek(0)
return checksum.hexdigest()
def copy(self, dest_cont, dest_file, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if 'destination' in cfg:
headers = {'Destination': cfg['destination']}
elif cfg.get('no_destination'):
headers = {}
else:
headers = {'Destination': '%s/%s' % (dest_cont, dest_file)}
headers.update(hdrs)
if 'Destination' in headers:
headers['Destination'] = urllib.quote(headers['Destination'])
return self.conn.make_request('COPY', self.path, hdrs=headers,
parms=parms) == 201
def delete(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) != 204:
raise ResponseError(self.conn.response, 'DELETE',
self.conn.make_path(self.path))
return True
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 200:
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
fields = [['content_length', 'content-length'],
['content_type', 'content-type'],
['last_modified', 'last-modified'],
['etag', 'etag']]
header_fields = self.header_fields(fields)
header_fields['etag'] = header_fields['etag'].strip('"')
return header_fields
def initialize(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if not self.name:
return False
status = self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms)
if status == 404:
return False
elif (status < 200) or (status > 299):
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
for hdr in self.conn.response.getheaders():
if hdr[0].lower() == 'content-type':
self.content_type = hdr[1]
if hdr[0].lower().startswith('x-object-meta-'):
self.metadata[hdr[0][14:]] = hdr[1]
if hdr[0].lower() == 'etag':
self.etag = hdr[1].strip('"')
if hdr[0].lower() == 'content-length':
self.size = int(hdr[1])
if hdr[0].lower() == 'last-modified':
self.last_modified = hdr[1]
return True
def load_from_filename(self, filename, callback=None):
fobj = open(filename, 'rb')
self.write(fobj, callback=callback)
fobj.close()
@property
def path(self):
return [self.container, self.name]
@classmethod
def random_data(cls, size=None):
if size is None:
size = random.randint(1, 32768)
fd = open('/dev/urandom', 'r')
data = fd.read(size)
fd.close()
return data
def read(self, size=-1, offset=0, hdrs=None, buffer=None,
callback=None, cfg=None, parms=None):
if cfg is None:
cfg = {}
if parms is None:
parms = {}
if size > 0:
range_string = 'bytes=%d-%d' % (offset, (offset + size) - 1)
if hdrs:
hdrs['Range'] = range_string
else:
hdrs = {'Range': range_string}
status = self.conn.make_request('GET', self.path, hdrs=hdrs,
cfg=cfg, parms=parms)
if (status < 200) or (status > 299):
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
for hdr in self.conn.response.getheaders():
if hdr[0].lower() == 'content-type':
self.content_type = hdr[1]
if hasattr(buffer, 'write'):
scratch = self.conn.response.read(8192)
transferred = 0
while len(scratch) > 0:
buffer.write(scratch)
transferred += len(scratch)
if callable(callback):
callback(transferred, self.size)
scratch = self.conn.response.read(8192)
return None
else:
return self.conn.response.read()
def read_md5(self):
status = self.conn.make_request('GET', self.path)
if (status < 200) or (status > 299):
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
checksum = hashlib.md5()
scratch = self.conn.response.read(8192)
while len(scratch) > 0:
checksum.update(scratch)
scratch = self.conn.response.read(8192)
return checksum.hexdigest()
def save_to_filename(self, filename, callback=None):
try:
fobj = open(filename, 'wb')
self.read(buffer=fobj, callback=callback)
finally:
fobj.close()
def sync_metadata(self, metadata=None, cfg=None):
if metadata is None:
metadata = {}
if cfg is None:
cfg = {}
self.metadata.update(metadata)
if self.metadata:
headers = self.make_headers(cfg=cfg)
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
headers['Content-Length'] = \
cfg.get('set_content_length')
else:
headers['Content-Length'] = 0
self.conn.make_request('POST', self.path, hdrs=headers, cfg=cfg)
if self.conn.response.status not in (201, 202):
raise ResponseError(self.conn.response, 'POST',
self.conn.make_path(self.path))
return True
def chunked_write(self, data=None, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if data is not None and self.chunked_write_in_progress:
self.conn.put_data(data, True)
elif data is not None:
self.chunked_write_in_progress = True
headers = self.make_headers(cfg=cfg)
headers.update(hdrs)
self.conn.put_start(self.path, hdrs=headers, parms=parms,
cfg=cfg, chunked=True)
self.conn.put_data(data, True)
elif self.chunked_write_in_progress:
self.chunked_write_in_progress = False
return self.conn.put_end(True) == 201
else:
raise RuntimeError
def write(self, data='', hdrs=None, parms=None, callback=None, cfg=None,
return_resp=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
block_size = 2 ** 20
if isinstance(data, file):
try:
data.flush()
data.seek(0)
except IOError:
pass
self.size = int(os.fstat(data.fileno())[6])
else:
data = StringIO.StringIO(data)
self.size = data.len
headers = self.make_headers(cfg=cfg)
headers.update(hdrs)
self.conn.put_start(self.path, hdrs=headers, parms=parms, cfg=cfg)
transferred = 0
buff = data.read(block_size)
buff_len = len(buff)
try:
while buff_len > 0:
self.conn.put_data(buff)
transferred += buff_len
if callable(callback):
callback(transferred, self.size)
buff = data.read(block_size)
buff_len = len(buff)
self.conn.put_end()
except socket.timeout as err:
raise err
if (self.conn.response.status < 200) or \
(self.conn.response.status > 299):
raise ResponseError(self.conn.response, 'PUT',
self.conn.make_path(self.path))
try:
data.seek(0)
except IOError:
pass
self.md5 = self.compute_md5sum(data)
if return_resp:
return self.conn.response
return True
def write_random(self, size=None, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
data = self.random_data(size)
if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg):
raise ResponseError(self.conn.response, 'PUT',
self.conn.make_path(self.path))
self.md5 = self.compute_md5sum(StringIO.StringIO(data))
return data
def write_random_return_resp(self, size=None, hdrs=None, parms=None,
cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
data = self.random_data(size)
resp = self.write(data, hdrs=hdrs, parms=parms, cfg=cfg,
return_resp=True)
if not resp:
raise ResponseError(self.conn.response)
self.md5 = self.compute_md5sum(StringIO.StringIO(data))
return resp
| 32.447009 | 78 | 0.522185 |
8aa25ff80f7e23538661eaa4cfc31ce525d81fdd | 1,698 | py | Python | storops_comptest/vnx/conftest.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 60 | 2016-04-18T23:42:10.000Z | 2022-03-23T02:26:03.000Z | storops_comptest/vnx/conftest.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 317 | 2016-05-25T06:45:37.000Z | 2022-03-25T13:22:38.000Z | storops_comptest/vnx/conftest.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 34 | 2016-03-18T02:39:12.000Z | 2022-01-07T12:54:14.000Z | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
import pytest
from storops_comptest.utils import setup_fixture
from storops_comptest.vnx import VNXGeneralFixtureManager, \
MultiVNXGeneralFixtureManager
__author__ = 'Cedric Zhuang'
log = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def vnx_gf(request):
""" General fixture for most vnx cases
Details including:
vnx - reference to the system.
pool - A RAID5 pool with 3 disks created on the fly.
lun - A LUN created in the pool.
snap - A snap created upon the LUN.
:param request:
:return:
"""
return setup_fixture(request, VNXGeneralFixtureManager)
@pytest.fixture(scope='session')
def multi_vnx_gf(request):
""" general fixture for multi VNX test cases
Details including:
vnx - reference to the system
sync_mirror - a synchronized mirror
:param request:
:return:
"""
return setup_fixture(request, MultiVNXGeneralFixtureManager)
| 29.275862 | 78 | 0.703769 |
68120999699e51ef50d16d206ced479bced05baf | 2,968 | py | Python | scripts/go_to_goal.py | VeryHardBit/control_turtebot2 | 5158574f9f7eb76bee19f16b7e11f6f72853fd43 | [
"Apache-2.0"
] | null | null | null | scripts/go_to_goal.py | VeryHardBit/control_turtebot2 | 5158574f9f7eb76bee19f16b7e11f6f72853fd43 | [
"Apache-2.0"
] | null | null | null | scripts/go_to_goal.py | VeryHardBit/control_turtebot2 | 5158574f9f7eb76bee19f16b7e11f6f72853fd43 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
from math import cos, sin, sqrt
class GoToGoal():
def __init__(self):
rospy.init_node('go_to_goal_node', anonymous=False, log_level=rospy.DEBUG)
rospy.loginfo("To stop Turtlebot press CTRL + C")
rospy.on_shutdown(self.shutdown)
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/navi' , Twist, queue_size=10)
self.odom_sub = rospy.Subscriber('odom', Odometry, self.odom_callback)
self.goal_pose_x = 0
self.goal_pose_y = 0
self.current_pose_x = 0
self.current_pose_y = 0
self.current_theta = 0
self.vel_x = 0
self.tol = 0.1
self.move_cmd = Twist()
rospy.wait_for_message("odom", Odometry) # wait for odometry data
def odom_callback(self, odom_data):
self.current_pose_x = odom_data.pose.pose.position.x
self.current_pose_y = odom_data.pose.pose.position.y
orientation_q = odom_data.pose.pose.orientation # orientation data
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]
# change orientation data to radius
(roll, pitch, yaw) = euler_from_quaternion(orientation_list)
self.current_theta = yaw # currently radius of robot
def go_to_goal(self, vel_x):
self.move_cmd.linear.x = vel_x # set velocity
while not rospy.is_shutdown():
if self.check_stop():
self.shutdown()
break
else:
self.cmd_vel_pub.publish(self.move_cmd)
def set_goal(self, distance, tol):
self.goal_pose_x = self.current_pose_x + (distance * cos(self.current_theta))
self.goal_pose_y = self.current_pose_y + (distance * sin(self.current_theta))
self.tol = tol
rospy.logdebug(" Current Pose : " + str([self.current_pose_x,self.current_pose_y]))
rospy.logdebug("Move Distance : " + str(distance) + " m with TOL : " + str(tol) + "m")
rospy.logdebug(" Goal Pose : " + str([self.goal_pose_x,self.goal_pose_y]))
def check_stop(self):
delta_x = self.goal_pose_x - self.current_pose_x
delta_y = self.goal_pose_y - self.current_pose_y
error = sqrt(delta_x ** 2 + delta_y ** 2)
rospy.logdebug(error)
if error <= self.tol:
return True
else:
return False
def shutdown(self):
rospy.loginfo("Stop TurtleBot")
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == "__main__":
try:
go_to_goal = GoToGoal()
# Set go at 1m ahead with 0.05m tolerant
go_to_goal.set_goal(1, 0.05)
# Start moving forward at speed 0.1 m/s
go_to_goal.go_to_goal(0.1)
except rospy.ROSInterruptException:
rospy.loginfo("GoToGoal Forward node terminated") | 41.802817 | 104 | 0.648922 |
28bc9bd6f5ff37af153078e086bf406fb61f1190 | 1,209 | py | Python | coursera/python_database/week2/emaildb.py | concongo/pythonSamples | 683a570b068ca76ba0ed1d9d42c90b03ed8e4553 | [
"MIT"
] | null | null | null | coursera/python_database/week2/emaildb.py | concongo/pythonSamples | 683a570b068ca76ba0ed1d9d42c90b03ed8e4553 | [
"MIT"
] | null | null | null | coursera/python_database/week2/emaildb.py | concongo/pythonSamples | 683a570b068ca76ba0ed1d9d42c90b03ed8e4553 | [
"MIT"
] | null | null | null | import sqlite3, re
conn = sqlite3.connect('emaildb.sqlite')
cur = conn.cursor()
cur.execute('''
DROP TABLE IF EXISTS Counts''')
cur.execute('''
CREATE TABLE Counts (org TEXT, count INTEGER)''')
fname = raw_input('Enter file name: ')
if ( len(fname) < 1 ) : fname = 'mbox.txt'
fh = open(fname)
for line in fh:
if not line.startswith('From: ') : continue
pieces = line.split()
org = re.findall('@([^ \n]*)',line)[0]
print org
cur.execute('SELECT count FROM Counts WHERE org = ? ', (org, ))
row = cur.fetchone()
if row is None:
cur.execute('''INSERT INTO Counts (org, count)
VALUES ( ?, 1 )''', ( org, ) )
else :
cur.execute('UPDATE Counts SET count=count+1 WHERE org = ?',
(org, ))
# This statement commits outstanding changes to disk each
# time through the loop - the program can be made faster
# by moving the commit so it runs only after the loop completes
#conn.commit()
#
conn.commit()
# https://www.sqlite.org/lang_select.html
sqlstr = 'SELECT org, count FROM Counts ORDER BY count DESC LIMIT 10'
print
print "Counts:"
for row in cur.execute(sqlstr) :
print str(row[0]), row[1]
cur.close()
| 26.282609 | 69 | 0.623656 |
e7fd8e36fecf752de25e9bff4c7878ecccb24dbf | 1,960 | py | Python | tests/integration/dashboard/test_voucher_form.py | Jean1508/ya-madoa | 1ffb1d11e15bf33e4c3a09698675a4357e887eaa | [
"BSD-3-Clause"
] | null | null | null | tests/integration/dashboard/test_voucher_form.py | Jean1508/ya-madoa | 1ffb1d11e15bf33e4c3a09698675a4357e887eaa | [
"BSD-3-Clause"
] | 5 | 2021-05-28T19:38:28.000Z | 2022-03-12T00:45:39.000Z | tests/integration/dashboard/test_voucher_form.py | Jean1508/ya-madoa | 1ffb1d11e15bf33e4c3a09698675a4357e887eaa | [
"BSD-3-Clause"
] | null | null | null | from datetime import timedelta
import pytest
from django import test
from django.utils import timezone
from oscar.apps.dashboard.vouchers import forms
from oscar.test.factories.offer import RangeFactory
class TestVoucherForm(test.TestCase):
def test_doesnt_crash_on_empty_date_fields(self):
"""
There was a bug fixed in 02b3644 where the voucher form would raise an
exception (instead of just failing validation) when being called with
empty fields. This tests exists to prevent a regression.
"""
data = {
'code': '',
'name': '',
'start_date': '',
'end_date': '',
'benefit_range': '',
'benefit_type': 'Percentage',
'usage': 'Single use',
}
form = forms.VoucherForm(data=data)
try:
form.is_valid()
except Exception as e:
import traceback
self.fail(
"Exception raised while validating voucher form: %s\n\n%s" % (
e.message, traceback.format_exc()))
@pytest.mark.django_db
class TestVoucherSetForm:
def test_valid_form(self):
a_range = RangeFactory(includes_all_products=True)
start = timezone.now()
end = start + timedelta(days=1)
data = {
'name': 'test',
'code_length': 12,
'description': 'test',
'start_datetime': start,
'end_datetime': end,
'count': 10,
'benefit_range': a_range.pk,
'benefit_type': 'Percentage',
'benefit_value': 10,
}
form = forms.VoucherSetForm(data=data)
assert form.is_valid()
instance = form.save()
assert instance.count == instance.vouchers.count()
assert instance.start_datetime == start
assert instance.end_datetime == end
| 31.111111 | 79 | 0.561224 |
c6ada70fb8253a88213f16cfdb59e911dd57ed44 | 5,273 | py | Python | OP/conf.py | daserzw/oidc-swamid-federation | 2f97eac2d63186848c4984b4fb9559cb9029b6a9 | [
"Apache-2.0"
] | null | null | null | OP/conf.py | daserzw/oidc-swamid-federation | 2f97eac2d63186848c4984b4fb9559cb9029b6a9 | [
"Apache-2.0"
] | null | null | null | OP/conf.py | daserzw/oidc-swamid-federation | 2f97eac2d63186848c4984b4fb9559cb9029b6a9 | [
"Apache-2.0"
] | null | null | null | from fedoidcendpoint.oidc import provider_config
from fedoidcendpoint.oidc import registration
from oidcendpoint import user_info
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.discovery import Discovery
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.oidc.userinfo import UserInfo
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcop.util import JSONDictDB
KEYDEFS = [{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}]
FED_KEYDEF = [{"type": "EC", "crv": "P-256", "use": ["sig"]}]
RESPONSE_TYPES_SUPPORTED = [
["code"], ["token"], ["id_token"], ["code", "token"], ["code", "id_token"],
["id_token", "token"], ["code", "token", "id_token"], ['none']]
CAPABILITIES = {
"response_types_supported": [" ".join(x) for x in RESPONSE_TYPES_SUPPORTED],
"token_endpoint_auth_methods_supported": [
"client_secret_post", "client_secret_basic",
"client_secret_jwt", "private_key_jwt"],
"response_modes_supported": ['query', 'fragment', 'form_post'],
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code", "implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer", "refresh_token"],
"claim_types_supported": ["normal", "aggregated", "distributed"],
"claims_parameter_supported": True,
"request_parameter_supported": True,
"request_uri_parameter_supported": True,
}
CONFIG = {
'provider': {
'jwks': {
'private_path': 'private/jwks.json',
'key_defs': KEYDEFS,
'public_path': 'public/jwks.json'
},
'server_info': {
"issuer": "https://127.0.0.1:8100",
"password": "mycket hemligt",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
'template_dir': 'templates',
"jwks": {
'url_path': '{}/public/jwks.json',
'local_path': 'public/jwks.json',
'private_path': 'private/jwks.json'
},
'endpoint': {
'webfinger': {
'path': '{}/.well-known/webfinger',
'class': Discovery,
'kwargs': {'client_authn_method': None}
},
'provider_info': {
'path': '{}/.well-known/openid-configuration',
'class': provider_config.ProviderConfiguration,
'kwargs': {'client_authn_method': None}
},
'registration': {
'path': '{}/registration',
'class': registration.Registration,
'kwargs': {'client_authn_method': None}
},
'authorization': {
'path': '{}/authorization',
'class': Authorization,
'kwargs': {'client_authn_method': None}
},
'token': {
'path': '{}/token',
'class': AccessToken,
'kwargs': {}
},
'userinfo': {
'path': '{}/userinfo',
'class': UserInfo,
}
},
'userinfo': {
'class': user_info.UserInfo,
'kwargs': {'db_file': 'users.json'}
},
'authentication': [
{
'acr': INTERNETPROTOCOLPASSWORD,
'name': 'UserPassJinja2',
'kwargs': {
'template': 'user_pass.jinja2',
'db': {
'class': JSONDictDB,
'kwargs':
{'json_path': 'passwd.json'}
},
'page_header': "Testing log in",
'submit_btn': "Get me in!",
'user_label': "Nickname",
'passwd_label': "Secret sauce"
}
},
{
'acr': 'anon',
'name': 'NoAuthn',
'kwargs': {'user': 'diana'}
}
],
'federation': {
'self_signer': {
'private_path': 'private/sign.json',
'key_defs': FED_KEYDEF,
'public_path': 'public/sign.json'
},
'mdss_endpoint': 'https://localhost:8089',
'mdss_owner': 'https://mdss.sunet.se',
'mdss_keys': 'mdss.jwks',
'fo_bundle': {
'dir': '../fo_bundle',
},
'context': 'dynamic',
'fo_priority': ['https://edugain.org',
'https://swamid.sunet.se']
}
}
},
'webserver': {
'cert': 'certs/cert.pem',
'key': 'certs/key.pem',
'cert_chain': '',
'port': 8100,
}
}
| 37.133803 | 80 | 0.460838 |
b215d3d0dd455544cdab0768ecd298e1fda591dd | 2,812 | py | Python | softlearning/policies/utils.py | abhishekunique/RND-ashwin | f8bcf3c593df2dacc0efba0875533be71ccb5011 | [
"MIT"
] | 5 | 2021-09-23T07:35:58.000Z | 2022-01-07T21:23:06.000Z | MTRF/algorithms/softlearning/policies/utils.py | facebookresearch/MTRF | 2fee8f3f1c2150fcecc2db2fa9e122a664a72d72 | [
"Apache-2.0"
] | 7 | 2020-09-25T22:41:46.000Z | 2022-03-12T00:37:25.000Z | softlearning/policies/utils.py | abhishekunique/RND-ashwin | f8bcf3c593df2dacc0efba0875533be71ccb5011 | [
"MIT"
] | 1 | 2021-12-10T20:01:16.000Z | 2021-12-10T20:01:16.000Z | from collections import OrderedDict
from copy import deepcopy
from softlearning.preprocessors.utils import get_preprocessor_from_params
def get_gaussian_policy(*args, **kwargs):
from .gaussian_policy import FeedforwardGaussianPolicy
policy = FeedforwardGaussianPolicy(*args, **kwargs)
return policy
def get_uniform_policy(*args, **kwargs):
from .uniform_policy import UniformPolicy
policy = UniformPolicy(*args, **kwargs)
return policy
def get_uniform_discrete_policy(*args, **kwargs):
from .uniform_policy import UniformDiscretePolicy
policy = UniformDiscretePolicy(*args, **kwargs)
return policy
def get_discrete_policy(*args, **kwargs):
from .discrete_policy import FeedforwardDiscretePolicy
policy = FeedforwardDiscretePolicy(*args, **kwargs)
return policy
POLICY_FUNCTIONS = {
'GaussianPolicy': get_gaussian_policy,
'UniformPolicy': get_uniform_policy,
'UniformDiscretePolicy': get_uniform_discrete_policy,
'DiscretePolicy': get_discrete_policy,
}
def get_policy(policy_type, *args, **kwargs):
return POLICY_FUNCTIONS[policy_type](*args, **kwargs)
def get_policy_from_params(policy_params, env, *args, **kwargs):
policy_type = policy_params['type']
policy_kwargs = deepcopy(policy_params.get('kwargs', {}))
observation_preprocessors_params = policy_kwargs.pop(
'observation_preprocessors_params', {})
observation_keys = policy_kwargs.pop(
'observation_keys', None) or env.observation_keys
goal_keys = policy_kwargs.pop('goal_keys', None) or tuple()
observation_shapes = OrderedDict((
(key, value) for key, value in env.observation_shape.items()
if key in observation_keys + goal_keys
))
observation_preprocessors = OrderedDict()
for name, observation_shape in observation_shapes.items():
preprocessor_params = observation_preprocessors_params.get(name, None)
if not preprocessor_params:
observation_preprocessors[name] = None
continue
observation_preprocessors[name] = get_preprocessor_from_params(
env, preprocessor_params)
if policy_type == 'UniformPolicy':
action_range = (env.action_space.low, env.action_space.high)
policy_kwargs['action_range'] = action_range
policy = POLICY_FUNCTIONS[policy_type](
input_shapes=observation_shapes,
output_shape=env.action_space.shape,
observation_keys=observation_keys,
goal_keys=goal_keys,
*args,
preprocessors=observation_preprocessors,
**policy_kwargs,
**kwargs)
return policy
def get_policy_from_variant(variant, *args, **kwargs):
policy_params = variant['policy_params']
return get_policy_from_params(policy_params, *args, **kwargs)
| 29.291667 | 78 | 0.728307 |
3d1999eede0bdf62081dd568481ba0babaa9f546 | 1,582 | py | Python | conjureup/models/addon.py | stefb965/conjure-up | 62d272ce82645109899c7e2a7a7ece0cc2bd3fc6 | [
"MIT"
] | null | null | null | conjureup/models/addon.py | stefb965/conjure-up | 62d272ce82645109899c7e2a7a7ece0cc2bd3fc6 | [
"MIT"
] | null | null | null | conjureup/models/addon.py | stefb965/conjure-up | 62d272ce82645109899c7e2a7a7ece0cc2bd3fc6 | [
"MIT"
] | null | null | null | from itertools import chain
from pathlib import Path
import yaml
from conjureup.app_config import app
from conjureup.models.step import StepModel
class AddonModel:
@classmethod
def load_spell_addons(cls):
"""
Return a list of all add-ons available for the current spell.
"""
addons_dir = Path(app.config['spell-dir']) / 'addons'
for addon_path in sorted(addons_dir.glob('*')):
if addon_path.is_dir():
app.addons[addon_path.name] = AddonModel(addon_path.name)
@classmethod
def selected_addons(cls):
return [app.addons[name] for name in sorted(app.selected_addons)]
@classmethod
def selected_addons_steps(cls):
return list(chain.from_iterable(
addon.steps for addon in cls.selected_addons()))
def __init__(self, name):
self.name = name
self.path = Path(app.config['spell-dir']) / 'addons' / name
self.metadata = self._read('metadata.yaml')
self.bundle = self._read('bundle.yaml')
self.steps = [StepModel.load(step_path)
for step_path in
sorted((self.path / 'steps').glob('*.yaml'))]
def _read(self, filename):
filepath = self.path / filename
if not filepath.exists():
return {}
return yaml.safe_load(filepath.read_text())
@property
def friendly_name(self):
return self.metadata.get('friendly-name', self.name)
@property
def description(self):
return self.metadata.get('description', self.name)
| 30.423077 | 73 | 0.627054 |
f78a6a3ec4dee0c4b194df08968179a27b24c642 | 3,443 | py | Python | meta/migrations/0001_initial.py | josl/COMPARE-Uploader-Docker | 56e41005cfe71f4245854e4f0211fa2757b46e93 | [
"Apache-2.0"
] | null | null | null | meta/migrations/0001_initial.py | josl/COMPARE-Uploader-Docker | 56e41005cfe71f4245854e4f0211fa2757b46e93 | [
"Apache-2.0"
] | null | null | null | meta/migrations/0001_initial.py | josl/COMPARE-Uploader-Docker | 56e41005cfe71f4245854e4f0211fa2757b46e93 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-07 13:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import meta.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Metadata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_id', models.CharField(default=meta.models.generate_id, editable=False, max_length=32, unique=True)),
('uid', models.CharField(editable=False, max_length=32, unique=True)),
('sequencing_platform', models.CharField(choices=[(b'Illumina', b'Illumina'), (b'Ion Torrent', b'Ion Torrent')], max_length=255)),
('sequencing_type', models.CharField(choices=[(b'single', b'single'), (b'paired', b'paired'), (b'mate-paired', b'mate-paired'), (b'unknown', b'unknown')], max_length=255)),
('pre_assembled', models.CharField(choices=[(b'yes', b'yes'), (b'no', b'no')], max_length=255)),
('isolation_source', models.CharField(choices=[(b'human', b'human'), (b'water', b'water'), (b'food', b'food'), (b'animal', b'animal'), (b'other', b'other'), (b'laboratory', b'laboratory')], max_length=255)),
('pathogenic', models.CharField(choices=[(b'yes', b'yes'), (b'no', b'no'), (b'unknown', b'unknown')], max_length=255, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('sample_name', models.CharField(max_length=255, null=True)),
('longitude', models.CharField(max_length=255, null=True)),
('latitude', models.CharField(max_length=255, null=True)),
('organism', models.CharField(max_length=255, null=True)),
('strain', models.CharField(max_length=255, null=True)),
('subtype', models.CharField(max_length=255, null=True)),
('country', models.CharField(max_length=255)),
('region', models.CharField(max_length=255, null=True)),
('city', models.CharField(max_length=255, null=True)),
('zip_code', models.CharField(max_length=255, null=True)),
('location_note', models.CharField(max_length=255, null=True)),
('source_note', models.CharField(max_length=255, null=True)),
('pathogenicity_note', models.CharField(max_length=255, null=True)),
('collected_by', models.CharField(max_length=255, null=True)),
('email_address', models.EmailField(max_length=255, null=True)),
('notes', models.TextField(max_length=255, null=True)),
('usage_restrictions', models.CharField(choices=[(b'private', b'private'), (b'public', b'public')], max_length=255, null=True)),
('collection_date', models.CharField(max_length=255)),
('release_date', models.EmailField(max_length=255, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 59.362069 | 223 | 0.60761 |
1115d9587b2562ce0ecc23423330073935ee9784 | 397 | py | Python | jumboSmash/jumboSmash/asgi.py | wolfep15/arith | 14e628210e54d8f171dff796e7c71d3ea1574067 | [
"MIT"
] | 1 | 2020-08-04T01:10:56.000Z | 2020-08-04T01:10:56.000Z | jumboSmash/jumboSmash/asgi.py | wolfep15/arith | 14e628210e54d8f171dff796e7c71d3ea1574067 | [
"MIT"
] | null | null | null | jumboSmash/jumboSmash/asgi.py | wolfep15/arith | 14e628210e54d8f171dff796e7c71d3ea1574067 | [
"MIT"
] | null | null | null | """
ASGI config for jumboSmash project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'jumboSmash.settings')
application = get_asgi_application()
| 23.352941 | 78 | 0.788413 |
5030752a559e0fb9d6e60b5c8be0a92f81280fe8 | 6,254 | py | Python | google_appengine/google/appengine/api/logservice/logsutil.py | iTrollYou/WEB_Spotify_Youtube | 5315cdf78361942bba0b52daa8b65d74998d2db5 | [
"MIT"
] | 26 | 2015-01-20T08:02:38.000Z | 2020-06-10T04:57:41.000Z | google_appengine/google/appengine/api/logservice/logsutil.py | iTrollYou/WEB_Spotify_Youtube | 5315cdf78361942bba0b52daa8b65d74998d2db5 | [
"MIT"
] | 4 | 2016-02-28T05:53:54.000Z | 2017-01-03T07:39:50.000Z | google/appengine/api/logservice/logsutil.py | fullstorydev/goappengine | a508ebb77930bef26785baf2f3278c95e6b63ffc | [
"Apache-2.0"
] | 13 | 2016-02-28T00:14:23.000Z | 2021-05-03T15:47:36.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility methods for working with logs."""
import os
import time
REQUEST_LOG_ID = 'REQUEST_LOG_ID'
_U_SEC = 1000000
FIXED_LOG_LINE_OVERHEAD = 15
LOG_LEVEL_DEBUG = 0
LOG_LEVEL_INFO = 1
LOG_LEVEL_WARNING = 2
LOG_LEVEL_ERROR = 3
LOG_LEVEL_CRITICAL = 4
LOG_LEVELS = [LOG_LEVEL_DEBUG,
LOG_LEVEL_INFO,
LOG_LEVEL_WARNING,
LOG_LEVEL_ERROR,
LOG_LEVEL_CRITICAL]
_DEFAULT_LEVEL = LOG_LEVEL_ERROR
def _CurrentTimeMicro():
return long(time.time() * _U_SEC)
def _Clean(e):
return e.replace('\0', '\n')
def Stripnl(message):
if message and message[-1] == '\n':
return message[:-1]
return message
def RequestID():
"""Returns the ID of the current request assigned by App Engine."""
return os.environ.get(REQUEST_LOG_ID, None)
def _StrictParseLogEntry(entry, clean_message=True):
r"""Parses a single log entry emitted by app_logging.AppLogsHandler.
Parses a log entry of the form LOG <level> <timestamp> <message> where the
level is in the range [0, 4]. If the entry is not of that form, ValueError is
raised.
Args:
entry: The log entry to parse.
clean_message: should the message be cleaned (i.e. \0 -> \n).
Returns:
A (timestamp, level, message, source_location) tuple, where source_location
is None.
Raises:
ValueError: if the entry failed to be parsed.
"""
magic, level, timestamp, message = entry.split(' ', 3)
if magic != 'LOG':
raise ValueError()
timestamp, level = long(timestamp), int(level)
if level not in LOG_LEVELS:
raise ValueError()
return timestamp, level, _Clean(message), None if clean_message else message
def ParseLogEntry(entry):
"""Parses a single log entry emitted by app_logging.AppLogsHandler.
Parses a log entry of the form LOG <level> <timestamp> <message> where the
level is in the range [0, 4]. If the entry is not of that form, take the whole
entry to be the message. Null characters in the entry are replaced by
newlines.
Args:
entry: The log entry to parse.
Returns:
A (timestamp, level, message, source_location) tuple.
"""
try:
return _StrictParseLogEntry(entry)
except ValueError:
return _CurrentTimeMicro(), _DEFAULT_LEVEL, _Clean(entry), None
def ParseLogs(logs):
"""Parses a str containing newline separated log entries.
Parses a series of log entries in the form LOG <level> <timestamp> <message>
where the level is in the range [0, 4]. Null characters in the entry are
replaced by newlines.
Args:
logs: A string containing the log entries.
Returns:
A list of (timestamp, level, message, source_location) tuples.
"""
return [ParseLogEntry(line) for line in logs.split('\n') if line]
class LoggingRecord(object):
"""A record with all logging information.
A record that came through the Python logging infrastructure that has various
metadata in addition to the message itself.
Note: the record may also come from stderr or logservice.write if the message
matches the classic format used by streaming logservice.
"""
def __init__(self, level, created, message, source_location):
self.level = level
self.created = created
self.source_location = source_location
self.message = message
def IsBlank(self):
return False
def IsComplete(self):
return True
def Tuple(self):
return self.level, self.created, self.source_location, self.message
def __len__(self):
return len(self.message) + FIXED_LOG_LINE_OVERHEAD
def __str__(self):
return 'LOG %d %d %s\n' % (self.level, self.created, self.message)
def __eq__(self, x):
return (self.level == x.level and self.created == x.created and
self.source_location == x.source_location and
self.message == x.message)
class StderrRecord(object):
"""A record with just a message.
A record that came from stderr or logservice.write where only a message
is available.
"""
def __init__(self, message):
self.message = message
self._created = _CurrentTimeMicro()
@property
def level(self):
return _DEFAULT_LEVEL
@property
def created(self):
return self._created
def Tuple(self):
return self.level, self.created, Stripnl(self.message), self.source_location
@property
def source_location(self):
return None
def IsBlank(self):
return self.message in ['', '\n']
def IsComplete(self):
return self.message and self.message[-1] == '\n'
def __len__(self):
return len(self.message)
def __str__(self):
return self.message
def RecordFromLine(line):
"""Create the correct type of record based on what the line looks like.
With the classic streaming API, we did not distinguish between a message
that came through the logging infrastructure and one that came through stderr
or logservice.write but had been written to look like it came from logging.
Note that this code does not provide 100% accuracy with the old stream
service. In the past, they could have written:
sys.stderr.write('LOG %d %d' % (level, time))
sys.stderr.write(' %s' % message)
and that would have magically turned into a single full record. Trying to
handle every single corner case seems like a poor use of time.
Args:
line: a single line written to stderr or logservice.write.
Returns:
The appropriate type of record.
"""
try:
created, level, unused_source_location, message = (
_StrictParseLogEntry(line, clean_message=False))
message = Stripnl(message)
return LoggingRecord(level, created, message, None)
except ValueError:
return StderrRecord(line)
| 25.217742 | 80 | 0.709946 |
0e9e2e8429e51a328e397f9e2a05ab7209c9c1a2 | 6,644 | py | Python | python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py | hshen14/Paddle | 0962be9c800d29e0804fc3135163bdfba1564c61 | [
"Apache-2.0"
] | 2 | 2019-04-03T05:36:17.000Z | 2020-04-29T03:38:54.000Z | python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py | hshen14/Paddle | 0962be9c800d29e0804fc3135163bdfba1564c61 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py | hshen14/Paddle | 0962be9c800d29e0804fc3135163bdfba1564c61 | [
"Apache-2.0"
] | 3 | 2019-01-07T06:50:29.000Z | 2019-03-13T08:48:23.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
EXP_MAX_INPUT = 40.0
def lstm_naive(
input,
w, ):
seq_len, batch_size, hidden_size = input.shape
offset = 0
wi = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
wf = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
wc = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
wo = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
ri = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
rf = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
rc = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
ro = w[offset:offset + hidden_size * hidden_size].reshape(
(hidden_size, hidden_size)).transpose()
offset += hidden_size * hidden_size
bi_1 = w[offset:offset + hidden_size]
offset += hidden_size
bf_1 = w[offset:offset + hidden_size]
offset += hidden_size
bc_1 = w[offset:offset + hidden_size]
offset += hidden_size
bo_1 = w[offset:offset + hidden_size]
offset += hidden_size
bi_2 = w[offset:offset + hidden_size]
offset += hidden_size
bf_2 = w[offset:offset + hidden_size]
offset += hidden_size
bc_2 = w[offset:offset + hidden_size]
offset += hidden_size
bo_2 = w[offset:offset + hidden_size]
def sigmoid(x):
y = np.copy(x)
y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN
y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX
return 1. / (1. + np.exp(-y))
def tanh(x):
y = -2. * x
y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT
return (2. / (1. + np.exp(y))) - 1.
output = []
pre_h = np.zeros((batch_size, hidden_size), dtype=input.dtype)
pre_c = np.zeros((batch_size, hidden_size), dtype=input.dtype)
for i in range(seq_len):
emb_1 = input[i]
input_gate = sigmoid(
np.matmul(emb_1, wi) + np.matmul(pre_h, ri) + bi_1 + bi_2)
forget_gate = sigmoid(
np.matmul(emb_1, wf) + np.matmul(pre_h, rf) + bf_1 + bf_2)
output_gate = sigmoid(
np.matmul(emb_1, wo) + np.matmul(pre_h, ro) + bo_1 + bo_2)
c_t_temp = tanh(
np.matmul(emb_1, wc) + np.matmul(pre_h, rc) + bc_1 + bc_2)
new_c = input_gate * c_t_temp + forget_gate * pre_c
new_h = output_gate * tanh(new_c)
pre_h = new_h
pre_c = new_c
output.append(new_h)
output = np.concatenate(output, -1)
output = output.reshape((batch_size, -1, hidden_size))
output = output.transpose((1, 0, 2))
return output, pre_h, pre_c
class TestCUDNNLstmOp(OpTest):
def setUp(self):
self.op_type = "cudnn_lstm"
self.dtype = np.float32
num_steps = 20
batch_size = 5
hidden_size = 20
input_weight_size = (hidden_size * hidden_size) * 4
hidden_weight_size = (hidden_size * hidden_size) * 4
weight_size = input_weight_size + hidden_weight_size
weight_size += hidden_size * 8
input = np.random.uniform(
low=-0.1, high=0.1, size=(num_steps, batch_size,
hidden_size)).astype(self.dtype)
flat_w = np.random.uniform(
low=-0.1, high=0.1, size=(weight_size)).astype(self.dtype)
output, last_hidden, last_cell = lstm_naive(input, flat_w)
init_h = np.zeros((batch_size, hidden_size), dtype=np.float32)
init_c = np.zeros((batch_size, hidden_size), dtype=np.float32)
scope = core.Scope()
program = fluid.Program()
block = program.global_block()
cache_temp = block.create_var(
name="Cache",
persistable=True,
type=core.VarDesc.VarType.RAW,
stop_gradient=True)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'W': OpTest.np_dtype_to_fluid_dtype(flat_w),
'InitH': OpTest.np_dtype_to_fluid_dtype(init_h),
'InitC': OpTest.np_dtype_to_fluid_dtype(init_c),
}
self.cache_name_list = ['Cache']
self.attrs = {
'max_len': num_steps,
'dropout_prob': 0.0,
'is_bidirec': False,
'input_size': hidden_size,
'hidden_size': hidden_size,
'num_layers': 1,
}
self.outputs = {
'Out': output,
"last_h": last_hidden,
'last_c': last_cell
}
def test_output_with_place(self):
if self.testcuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
def test_grad_with_place(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
set(['Input', 'W', 'InitH', 'InitC']),
['Out', 'last_h', 'last_c'],
max_relative_error=0.02)
def testcuda(self):
return core.is_compiled_with_cuda()
if __name__ == '__main__':
unittest.main()
| 34.42487 | 75 | 0.604154 |
8e9db82fa14a4a923f84e8c28045f18725ee4e6d | 2,206 | py | Python | google-cloud-sdk/lib/surface/pubsub/topics/delete.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/lib/surface/pubsub/topics/delete.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/lib/surface/pubsub/topics/delete.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub topics delete command."""
from apitools.base.py import exceptions as api_ex
from googlecloudsdk.api_lib.util import exceptions
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.pubsub import util
from googlecloudsdk.core import log
class Delete(base.DeleteCommand):
"""Deletes one or more Cloud Pub/Sub topics."""
@staticmethod
def Args(parser):
parser.add_argument('topic', nargs='+',
help='One or more topic names to delete.')
def Collection(self):
return util.TOPICS_COLLECTION
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
A serialized object (dict) describing the results of the operation.
This description fits the Resource described in the ResourceRegistry under
'pubsub.projects.topics'.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
for topic_name in args.topic:
topic = msgs.Topic(name=util.TopicFormat(topic_name))
delete_req = msgs.PubsubProjectsTopicsDeleteRequest(
topic=util.TopicFormat(topic.name))
try:
pubsub.projects_topics.Delete(delete_req)
failed = None
except api_ex.HttpError as error:
exc = exceptions.HttpException(error)
failed = exc.payload.status_message
result = util.TopicDisplayDict(topic, failed)
log.DeletedResource(topic.name, kind='topic', failed=failed)
yield result
| 33.938462 | 80 | 0.719402 |
fec922ddaec07a638062872503e9bcf797201956 | 642 | py | Python | scrapeSites.py | MuSystemsAnalysis/craigslist_area_search | 636ce7220ec65c346d3e20121e2312a98e39028f | [
"MIT"
] | null | null | null | scrapeSites.py | MuSystemsAnalysis/craigslist_area_search | 636ce7220ec65c346d3e20121e2312a98e39028f | [
"MIT"
] | 1 | 2015-10-19T01:00:28.000Z | 2015-10-19T01:00:28.000Z | scrapeSites.py | MuSystemsAnalysis/craigslist_area_search | 636ce7220ec65c346d3e20121e2312a98e39028f | [
"MIT"
] | null | null | null | #!/usr/bin/python3.4
# Craigslist City Scraper
# By Marshall Ehlinger
# For sp2015 Systems Analysis and Design
# Returns dictionary of 'city name string' : 'site url'
# for all American cities in states/territories @ CL
from bs4 import BeautifulSoup
import re
def getCities():
with open("sites.htm") as sites:
fh = sites.read()
soup = BeautifulSoup(fh, "html.parser")
placesDict = {}
for columnDiv in soup.h1.next_sibling.next_sibling:
for state in columnDiv:
for city in state:
m = (re.search('<li><a href="(.+)">(.+)</a>', str(city)))
if m:
placesDict[m.group(2)] = m.group(1)
return(placesDict)
| 22.137931 | 62 | 0.669782 |
8c7763a9c8ff2543bc7a6b4d88b0dc6b8243468b | 1,904 | py | Python | senlin-7.0.0/senlin/api/openstack/versions.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | senlin-7.0.0/senlin/api/openstack/versions.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | senlin-7.0.0/senlin/api/openstack/versions.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Controller that returns information on the senlin API versions
"""
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from six.moves import http_client
import webob.dec
from senlin.api.openstack.v1 import version as v1_controller
class Controller(object):
"""A controller that produces information on the senlin API versions."""
Controllers = {
'1.0': v1_controller.VersionController,
}
def __init__(self, conf):
self.conf = conf
@webob.dec.wsgify
def __call__(self, req):
"""Respond to a request for all OpenStack API versions."""
versions = []
for ver, vc in self.Controllers.items():
versions.append(vc.version_info())
body = jsonutils.dumps(dict(versions=versions))
response = webob.Response(request=req,
status=http_client.MULTIPLE_CHOICES,
content_type='application/json')
response.body = encodeutils.safe_encode(body)
return response
def get_controller(self, version):
"""Return the version specific controller.
:param version: The version string for mapping.
:returns: A version controller instance or ``None``.
"""
return self.Controllers.get(version, None)
| 31.733333 | 78 | 0.672269 |
a3b2dba4452e82984a6464eb2d85143399185379 | 5,983 | py | Python | DataConnector/AppIdentifier.py | twatteynelinear/dustlink_sierra | 9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b | [
"BSD-3-Clause"
] | 4 | 2016-09-07T05:46:20.000Z | 2020-05-31T21:34:27.000Z | DataConnector/AppIdentifier.py | twatteynelinear/dustlink_sierra | 9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b | [
"BSD-3-Clause"
] | null | null | null | DataConnector/AppIdentifier.py | twatteynelinear/dustlink_sierra | 9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b | [
"BSD-3-Clause"
] | 6 | 2015-01-22T10:14:24.000Z | 2020-05-31T21:34:30.000Z | #!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('AppIdentifier')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import struct
from DustLinkData import DustLinkData
from EventBus import EventBusClient
from SmartMeshSDK.protocols.oap import OAPDispatcher, \
OAPNotif
from SmartMeshSDK.IpMgrConnectorMux import IpMgrConnectorMux, \
IpMgrSubscribe
class AppIdentifier(EventBusClient.EventBusClient):
QUEUESIZE = 100
def __init__(self):
# log
log.info('creating instance')
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
'notifData',
self._identifyApp,
queuesize=self.QUEUESIZE,
)
self.name = 'DataConnector_AppIdentifier'
# add stats
# local variables
self.oap_dispatch = OAPDispatcher.OAPDispatcher()
self.oap_dispatch.register_notif_handler(self._handle_oap_notif)
self.appTransports = {}
#======================== public ==========================================
#======================== private =========================================
def _identifyApp(self,sender,signal,data):
alreadyDispatchedOap = False
dld = DustLinkData.DustLinkData()
with dld.dataLock:
# log
if log.isEnabledFor(logging.DEBUG):
log.debug('identifying app for data={0}'.format(data))
# get the transports of the apps
if dld.getFastMode():
# using caching
if not self.appTransports:
for appName in dld.getAppNames():
self.appTransports[appName] = dld.getAppTransport(appName)
else:
# not using caching
for appName in dld.getAppNames():
self.appTransports[appName] = dld.getAppTransport(appName)
# TODO: add support for OAP, CoAP, MoteRunner
# match incoming data to app
for appname,(transport,resource) in self.appTransports.items():
if transport == DustLinkData.DustLinkData.APP_TRANSPORT_UDP:
if resource == data['destPort']:
packetOut = {
#'timestamp' : data['timestamp'],
'timestamp' : time.time(),
'mac' : data['mac'],
'payload' : data['payload'],
}
# log
if log.isEnabledFor(logging.DEBUG):
log.debug('coming from app {0}'.format(appname))
# dispatch
self._dispatch (
signal = 'rawAppData_'+ str(appname),
data = packetOut,
)
elif transport == DustLinkData.DustLinkData.APP_TRANSPORT_OAP:
if not alreadyDispatchedOap:
notifParams = IpMgrConnectorMux.IpMgrConnectorMux.Tuple_notifData(
utcSecs = int(data['timestamp']),
utcUsecs = int((data['timestamp']*1000)%1000),
macAddress = data['mac'],
srcPort = data['srcPort'],
dstPort = data['destPort'],
data = data['payload'],
)
self.oap_dispatch.dispatch_pkt(IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA, notifParams)
alreadyDispatchedOap = True
elif transport == DustLinkData.DustLinkData.APP_TRANSPORT_COAP:
raise NotImplementedError()
elif transport == DustLinkData.DustLinkData.APP_TRANSPORT_MOTERUNNER:
raise NotImplementedError()
else:
# not transport specified yet. Can happen if addApp() is
# called, but not setAppTransport() yet.
pass
def _handle_oap_notif(self,mac,notif):
# convert MAC to tuple
mac = tuple(mac)
if isinstance(notif,OAPNotif.OAPTempSample):
appname = 'OAPTemperature'
# attach this app to this mote
dld = DustLinkData.DustLinkData()
with dld.dataLock:
if not dld.getFastMode():
try:
dld.attachAppToMote(mac,appname)
except ValueError:
pass # happens when mote not known, app not known, or app already attached to mote
# TODO: use timestamp from packet. Need to sync manager to UTC for that
packetOut = {
#'timestamp' : time.mktime(notif.received_timestamp.timetuple()),
'timestamp' : time.time(),
'mac' : mac,
'payload' : [ord(b) for b in struct.pack('>h',notif.samples[0])],
}
# log
if log.isEnabledFor(logging.DEBUG):
log.debug('coming from app {0}'.format(appname))
# dispatch
self._dispatch (
signal = 'rawAppData_{0}'.format(appname),
data = packetOut,
)
| 36.705521 | 106 | 0.467826 |
02832badad95488e9a747c2273893b9f18ee657a | 156 | py | Python | src/utils.py | trankhavy/capstone_demo | c3264a8dd4e64e065019673f8f880327906a992b | [
"MIT"
] | null | null | null | src/utils.py | trankhavy/capstone_demo | c3264a8dd4e64e065019673f8f880327906a992b | [
"MIT"
] | null | null | null | src/utils.py | trankhavy/capstone_demo | c3264a8dd4e64e065019673f8f880327906a992b | [
"MIT"
] | null | null | null | import os
ROOT = os.getcwd()
class JSONConfig(object):
def __init__(self, **args):
for key in args:
setattr(self, key, args[key])
| 17.333333 | 41 | 0.596154 |
d41947510c1bb0c87fc2837a91f3fe028516a1a3 | 22,273 | py | Python | pandas/core/sorting.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | 1 | 2022-03-05T02:14:02.000Z | 2022-03-05T02:14:02.000Z | pandas/core/sorting.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | 1 | 2022-03-08T02:15:07.000Z | 2022-03-08T02:15:07.000Z | pandas/core/sorting.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | 1 | 2022-03-22T11:50:25.000Z | 2022-03-22T11:50:25.000Z | """ miscellaneous sorting / groupby utilities """
from __future__ import annotations
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Callable,
DefaultDict,
Hashable,
Iterable,
Sequence,
)
import warnings
import numpy as np
from pandas._libs import (
algos,
hashtable,
lib,
)
from pandas._libs.hashtable import unique_label_indices
from pandas._typing import (
IndexKeyFunc,
Shape,
npt,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_extension_array_dtype,
)
from pandas.core.dtypes.generic import (
ABCMultiIndex,
ABCRangeIndex,
)
from pandas.core.dtypes.missing import isna
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import MultiIndex
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
def get_indexer_indexer(
target: Index,
level: str | int | list[str] | list[int],
ascending: Sequence[bool | int] | bool | int,
kind: str,
na_position: str,
sort_remaining: bool,
key: IndexKeyFunc,
) -> npt.NDArray[np.intp] | None:
"""
Helper method that return the indexer according to input parameters for
the sort_index method of DataFrame and Series.
Parameters
----------
target : Index
level : int or level name or list of ints or list of level names
ascending : bool or list of bools, default True
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
na_position : {'first', 'last'}, default 'last'
sort_remaining : bool, default True
key : callable, optional
Returns
-------
Optional[ndarray[intp]]
The indexer for the new index.
"""
target = ensure_key_mapped(target, key, levels=level)
target = target._sort_levels_monotonic()
if level is not None:
_, indexer = target.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(target, ABCMultiIndex):
indexer = lexsort_indexer(
target._get_codes_for_sorting(), orders=ascending, na_position=na_position
)
else:
# Check monotonic-ness before sort an index (GH 11080)
if (ascending and target.is_monotonic_increasing) or (
not ascending and target.is_monotonic_decreasing
):
return None
indexer = nargsort(
target, kind=kind, ascending=ascending, na_position=na_position
)
return indexer
def get_group_index(
labels, shape: Shape, sort: bool, xnull: bool
) -> npt.NDArray[np.int64]:
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels : sequence of arrays
Integers identifying levels at each location
shape : tuple[int, ...]
Number of unique levels at each location
sort : bool
If the ranks of returned ids should match lexical ranks of labels
xnull : bool
If true nulls are excluded. i.e. -1 values in the labels are
passed through.
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
Notes
-----
The length of `labels` and `shape` must be identical.
"""
def _int64_cut_off(shape) -> int:
acc = 1
for i, mul in enumerate(shape):
acc *= int(mul)
if not acc < lib.i8max:
return i
return len(shape)
def maybe_lift(lab, size) -> tuple[np.ndarray, int]:
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = [ensure_int64(x) for x in labels]
lshape = list(shape)
if not xnull:
for i, (lab, size) in enumerate(zip(labels, shape)):
lab, size = maybe_lift(lab, size)
labels[i] = lab
lshape[i] = size
labels = list(labels)
# Iteratively process all the labels in chunks sized so less
# than lib.i8max unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(lshape)
# compute flat ids for the first `nlev` levels
stride = np.prod(lshape[1:nlev], dtype="i8")
out = stride * labels[0].astype("i8", subok=False, copy=False)
for i in range(1, nlev):
if lshape[i] == 0:
stride = np.int64(0)
else:
stride //= lshape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(lshape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
lshape = [len(obs_ids)] + lshape[nlev:]
return out
def get_compressed_ids(
labels, sizes: Shape
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]:
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : tuple[int] of size of the levels
Returns
-------
np.ndarray[np.intp]
comp_ids
np.ndarray[np.int64]
obs_group_ids
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape: Shape) -> bool:
the_prod = 1
for x in shape:
the_prod *= int(x)
return the_prod >= lib.i8max
def _decons_group_index(
comp_labels: npt.NDArray[np.intp], shape: Shape
) -> list[npt.NDArray[np.intp]]:
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError("cannot deconstruct factorized group indices!")
label_list = []
factor = 1
y = np.array(0)
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(
comp_ids: npt.NDArray[np.intp],
obs_ids: npt.NDArray[np.intp],
shape: Shape,
labels: Sequence[npt.NDArray[np.signedinteger]],
xnull: bool,
) -> list[npt.NDArray[np.intp]]:
"""
Reconstruct labels from observed group ids.
Parameters
----------
comp_ids : np.ndarray[np.intp]
obs_ids: np.ndarray[np.intp]
shape : tuple[int]
labels : Sequence[np.ndarray[np.signedinteger]]
xnull : bool
If nulls are excluded; i.e. -1 labels are passed through.
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype=np.intp)
arr_shape = np.asarray(shape, dtype=np.intp) + lift
shape = tuple(arr_shape)
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = _decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]
indexer = unique_label_indices(comp_ids)
return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels]
def indexer_from_factorized(
labels, shape: Shape, compress: bool = True
) -> npt.NDArray[np.intp]:
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(
keys, orders=None, na_position: str = "last", key: Callable | None = None
) -> npt.NDArray[np.intp]:
"""
Performs lexical sorting on a set of keys
Parameters
----------
keys : sequence of arrays
Sequence of ndarrays to be sorted by the indexer
orders : bool or list of booleans, optional
Determines the sorting order for each element in keys. If a list,
it must be the same length as keys. This determines whether the
corresponding element in keys should be sorted in ascending
(True) or descending (False) order. if bool, applied to all
elements as above. if None, defaults to True.
na_position : {'first', 'last'}, default 'last'
Determines placement of NA elements in the sorted list ("last" or "first")
key : Callable, optional
Callable key function applied to every element in keys before sorting
.. versionadded:: 1.0.0
Returns
-------
np.ndarray[np.intp]
"""
from pandas.core.arrays import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
keys = [ensure_key_mapped(k, key) for k in keys]
for k, order in zip(keys, orders):
with warnings.catch_warnings():
# TODO(2.0): unnecessary once deprecation is enforced
# GH#45618 don't issue warning user can't do anything about
warnings.filterwarnings("ignore", ".*SparseArray.*", category=FutureWarning)
cat = Categorical(k, ordered=True)
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {na_position}")
n = len(cat.categories)
codes = cat.codes.copy()
mask = cat.codes == -1
if order: # ascending
if na_position == "last":
codes = np.where(mask, n, codes)
elif na_position == "first":
codes += 1
else: # not order means descending
if na_position == "last":
codes = np.where(mask, n, n - codes - 1)
elif na_position == "first":
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, tuple(shape))
def nargsort(
items,
kind: str = "quicksort",
ascending: bool = True,
na_position: str = "last",
key: Callable | None = None,
mask: npt.NDArray[np.bool_] | None = None,
) -> npt.NDArray[np.intp]:
"""
Intended to be a drop-in replacement for np.argsort which handles NaNs.
Adds ascending, na_position, and key parameters.
(GH #6399, #5231, #27237)
Parameters
----------
kind : str, default 'quicksort'
ascending : bool, default True
na_position : {'first', 'last'}, default 'last'
key : Optional[Callable], default None
mask : Optional[np.ndarray[bool]], default None
Passed when called by ExtensionArray.argsort.
Returns
-------
np.ndarray[np.intp]
"""
if key is not None:
items = ensure_key_mapped(items, key)
return nargsort(
items,
kind=kind,
ascending=ascending,
na_position=na_position,
key=None,
mask=mask,
)
if isinstance(items, ABCRangeIndex):
return items.argsort(ascending=ascending) # TODO: test coverage with key?
elif not isinstance(items, ABCMultiIndex):
items = extract_array(items)
if mask is None:
mask = np.asarray(isna(items)) # TODO: does this exclude MultiIndex too?
if is_extension_array_dtype(items):
return items.argsort(ascending=ascending, kind=kind, na_position=na_position)
else:
items = np.asanyarray(items)
idx = np.arange(len(items))
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == "last":
indexer = np.concatenate([indexer, nan_idx])
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError(f"invalid na_position: {na_position}")
return ensure_platform_int(indexer)
def nargminmax(values: ExtensionArray, method: str, axis: int = 0):
"""
Implementation of np.argmin/argmax but for ExtensionArray and which
handles missing values.
Parameters
----------
values : ExtensionArray
method : {"argmax", "argmin"}
axis : int, default 0
Returns
-------
int
"""
assert method in {"argmax", "argmin"}
func = np.argmax if method == "argmax" else np.argmin
mask = np.asarray(isna(values))
arr_values = values._values_for_argsort()
if arr_values.ndim > 1:
if mask.any():
if axis == 1:
zipped = zip(arr_values, mask)
else:
zipped = zip(arr_values.T, mask.T)
return np.array([_nanargminmax(v, m, func) for v, m in zipped])
return func(arr_values, axis=axis)
return _nanargminmax(arr_values, mask, func)
def _nanargminmax(values: np.ndarray, mask: npt.NDArray[np.bool_], func) -> int:
"""
See nanargminmax.__doc__.
"""
idx = np.arange(values.shape[0])
non_nans = values[~mask]
non_nan_idx = idx[~mask]
return non_nan_idx[func(non_nans)]
def _ensure_key_mapped_multiindex(
index: MultiIndex, key: Callable, level=None
) -> MultiIndex:
"""
Returns a new MultiIndex in which key has been applied
to all levels specified in level (or all levels if level
is None). Used for key sorting for MultiIndex.
Parameters
----------
index : MultiIndex
Index to which to apply the key function on the
specified levels.
key : Callable
Function that takes an Index and returns an Index of
the same shape. This key is applied to each level
separately. The name of the level can be used to
distinguish different levels for application.
level : list-like, int or str, default None
Level or list of levels to apply the key function to.
If None, key function is applied to all levels. Other
levels are left unchanged.
Returns
-------
labels : MultiIndex
Resulting MultiIndex with modified levels.
"""
if level is not None:
if isinstance(level, (str, int)):
sort_levels = [level]
else:
sort_levels = level
sort_levels = [index._get_level_number(lev) for lev in sort_levels]
else:
sort_levels = list(range(index.nlevels)) # satisfies mypy
mapped = [
ensure_key_mapped(index._get_level_values(level), key)
if level in sort_levels
else index._get_level_values(level)
for level in range(index.nlevels)
]
return type(index).from_arrays(mapped)
def ensure_key_mapped(values, key: Callable | None, levels=None):
"""
Applies a callable key function to the values function and checks
that the resulting value has the same shape. Can be called on Index
subclasses, Series, DataFrames, or ndarrays.
Parameters
----------
values : Series, DataFrame, Index subclass, or ndarray
key : Optional[Callable], key to be called on the values array
levels : Optional[List], if values is a MultiIndex, list of levels to
apply the key to.
"""
from pandas.core.indexes.api import Index
if not key:
return values
if isinstance(values, ABCMultiIndex):
return _ensure_key_mapped_multiindex(values, key, level=levels)
result = key(values.copy())
if len(result) != len(values):
raise ValueError(
"User-provided `key` function must not change the shape of the array."
)
try:
if isinstance(
values, Index
): # convert to a new Index subclass, not necessarily the same
result = Index(result)
else:
type_of_values = type(values)
result = type_of_values(result) # try to revert to original type otherwise
except TypeError:
raise TypeError(
f"User-provided `key` function returned an invalid type {type(result)} \
which could not be converted to {type(values)}."
)
return result
def get_flattened_list(
comp_ids: npt.NDArray[np.intp],
ngroups: int,
levels: Iterable[Index],
labels: Iterable[np.ndarray],
) -> list[tuple]:
"""Map compressed group id -> key tuple."""
comp_ids = comp_ids.astype(np.int64, copy=False)
arrays: DefaultDict[int, list[int]] = defaultdict(list)
for labs, level in zip(labels, levels):
table = hashtable.Int64HashTable(ngroups)
table.map_keys_to_values(comp_ids, labs.astype(np.int64, copy=False))
for i in range(ngroups):
arrays[i].append(level[table.get_item(i)])
return [tuple(array) for array in arrays.values()]
def get_indexer_dict(
label_list: list[np.ndarray], keys: list[Index]
) -> dict[Hashable, npt.NDArray[np.intp]]:
"""
Returns
-------
dict:
Labels mapped to indexers.
"""
shape = tuple(len(x) for x in keys)
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
if np.all(group_index == -1):
# Short-circuit, lib.indices_fast will return the same
return {}
ngroups = (
((group_index.size and group_index.max()) + 1)
if is_int64_overflow_possible(shape)
else np.prod(shape, dtype="i8")
)
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(
group_index: npt.NDArray[np.intp], ngroups: int | None = None
) -> npt.NDArray[np.intp]:
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
Parameters
----------
group_index : np.ndarray[np.intp]
signed integer dtype
ngroups : int or None, default None
Returns
-------
np.ndarray[np.intp]
"""
if ngroups is None:
ngroups = 1 + group_index.max()
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = count > 0 and ((alpha + beta * ngroups) < (count * np.log(count)))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(
ensure_platform_int(group_index),
ngroups,
)
# sorter _should_ already be intp, but mypy is not yet able to verify
else:
sorter = group_index.argsort(kind="mergesort")
return ensure_platform_int(sorter)
def compress_group_index(
group_index: npt.NDArray[np.int64], sort: bool = True
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]:
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = len(group_index)
table = hashtable.Int64HashTable(size_hint)
group_index = ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return ensure_int64(comp_ids), ensure_int64(obs_group_ids)
def _reorder_by_uniques(
uniques: npt.NDArray[np.int64], labels: npt.NDArray[np.intp]
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.intp]]:
"""
Parameters
----------
uniques : np.ndarray[np.int64]
labels : np.ndarray[np.intp]
Returns
-------
np.ndarray[np.int64]
np.ndarray[np.intp]
"""
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.intp)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = reverse_indexer.take(labels)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = uniques.take(sorter)
return uniques, labels
| 30.721379 | 88 | 0.633098 |
6e28ea1651e603a824b236666ddda0fdc4e1e949 | 18,105 | py | Python | c2cgeoportal/views/layers.py | craxxkid/c2cgeoportal | 60ca7d5d014d69b0a938f858271c911a30da77c3 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cgeoportal/views/layers.py | craxxkid/c2cgeoportal | 60ca7d5d014d69b0a938f858271c911a30da77c3 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cgeoportal/views/layers.py | craxxkid/c2cgeoportal | 60ca7d5d014d69b0a938f858271c911a30da77c3 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
from pyramid.httpexceptions import HTTPInternalServerError, \
HTTPNotFound, HTTPBadRequest, HTTPForbidden
from pyramid.view import view_config
from sqlalchemy import func, distinct
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.sql import and_, or_
from sqlalchemy.orm.util import class_mapper
from sqlalchemy.orm.properties import ColumnProperty
from geoalchemy2 import Geometry, func as ga_func
from geoalchemy2.shape import from_shape, to_shape
import geojson
from geojson.feature import FeatureCollection, Feature
from shapely.geometry import asShape
from shapely.ops import cascaded_union
from shapely.geos import TopologicalError
from papyrus.protocol import Protocol, create_filter
from c2cgeoportal.lib.caching import get_region, \
set_common_headers, NO_CACHE, PUBLIC_CACHE, PRIVATE_CACHE
from c2cgeoportal.lib.dbreflection import get_class, get_table
from c2cgeoportal.models import DBSessions, DBSession, Layer, RestrictionArea, Role
cache_region = get_region()
class Layers(object):
def __init__(self, request):
self.request = request
self.settings = request.registry.settings.get("layers", {})
self.layers_enum_config = self.settings.get("enum", None)
def _get_geom_col_info(self, layer):
""" Return information about the layer's geometry column, namely
a ``(name, srid)`` tuple, where ``name`` is the name of the
geometry column, and ``srid`` its srid.
This function assumes that the names of geometry attributes
in the mapped class are the same as those of geometry columns.
"""
mapped_class = get_class(str(layer.geo_table))
for p in class_mapper(mapped_class).iterate_properties:
if not isinstance(p, ColumnProperty):
continue # pragma: no cover
col = p.columns[0]
if isinstance(col.type, Geometry):
return col.name, col.type.srid
raise HTTPInternalServerError(
'Failed getting geometry column info for table "%s".' %
str(layer.geo_table)
) # pragma: no cover
def _get_layer(self, layer_id):
""" Return a ``Layer`` object for ``layer_id``. """
layer_id = int(layer_id)
try:
query = DBSession.query(Layer, Layer.geo_table)
query = query.filter(Layer.id == layer_id)
layer, geo_table = query.one()
except NoResultFound:
raise HTTPNotFound("Layer %d not found" % layer_id)
except MultipleResultsFound: # pragma: no cover
raise HTTPInternalServerError(
"Too many layers found with id %i" % layer_id
)
if not geo_table: # pragma: no cover
raise HTTPNotFound("Layer %d has no geo table" % layer_id)
return layer
def _get_layers_for_request(self):
""" A generator function that yields ``Layer`` objects based
on the layer ids found in the ``layer_id`` matchdict. """
try:
layer_ids = (
int(layer_id) for layer_id in
self.request.matchdict["layer_id"].split(",") if layer_id)
for layer_id in layer_ids:
yield self._get_layer(layer_id)
except ValueError:
raise HTTPBadRequest(
"A Layer id in '%s' is not an integer" %
self.request.matchdict["layer_id"]
) # pragma: no cover
def _get_layer_for_request(self):
""" Return a ``Layer`` object for the first layer id found
in the ``layer_id`` matchdict. """
return next(self._get_layers_for_request())
def _get_protocol_for_layer(self, layer, **kwargs):
""" Returns a papyrus ``Protocol`` for the ``Layer`` object. """
cls = get_class(str(layer.geo_table))
geom_attr = self._get_geom_col_info(layer)[0]
return Protocol(DBSession, cls, geom_attr, **kwargs)
def _get_protocol_for_request(self, **kwargs):
""" Returns a papyrus ``Protocol`` for the first layer
id found in the ``layer_id`` matchdict. """
layer = self._get_layer_for_request()
return self._get_protocol_for_layer(layer, **kwargs)
def _proto_read(self, layer):
""" Read features for the layer based on the self.request. """
proto = self._get_protocol_for_layer(layer)
if layer.public:
return proto.read(self.request)
user = self.request.user
if user is None:
raise HTTPForbidden()
cls = proto.mapped_class
geom_attr = proto.geom_attr
ras = DBSession.query(RestrictionArea.area, RestrictionArea.area.ST_SRID())
ras = ras.join(RestrictionArea.roles)
ras = ras.join(RestrictionArea.layers)
ras = ras.filter(Role.id == user.role.id)
ras = ras.filter(Layer.id == layer.id)
collect_ra = []
use_srid = -1
for ra, srid in ras.all():
if ra is None:
return proto.read(self.request)
else:
use_srid = srid
collect_ra.append(to_shape(ra))
if len(collect_ra) == 0: # pragma: no cover
raise HTTPForbidden()
filter1_ = create_filter(self.request, cls, geom_attr)
ra = cascaded_union(collect_ra)
filter2_ = ga_func.ST_Contains(
from_shape(ra, use_srid),
getattr(cls, geom_attr)
)
filter_ = filter2_ if filter1_ is None else and_(filter1_, filter2_)
return proto.read(self.request, filter=filter_)
@view_config(route_name="layers_read_many", renderer="geojson")
def read_many(self):
set_common_headers(self.request, "layers", NO_CACHE)
features = []
for layer in self._get_layers_for_request():
for f in self._proto_read(layer).features:
f.properties["__layer_id__"] = layer.id
features.append(f)
return FeatureCollection(features)
@view_config(route_name="layers_read_one", renderer="geojson")
def read_one(self):
set_common_headers(self.request, "layers", NO_CACHE)
layer = self._get_layer_for_request()
protocol = self._get_protocol_for_layer(layer)
feature_id = self.request.matchdict.get("feature_id", None)
feature = protocol.read(self.request, id=feature_id)
if not isinstance(feature, Feature):
return feature
if layer.public:
return feature
if self.request.user is None:
raise HTTPForbidden()
geom = feature.geometry
if not geom or isinstance(geom, geojson.geometry.Default): # pragma: no cover
return feature
shape = asShape(geom)
srid = self._get_geom_col_info(layer)[1]
spatial_elt = from_shape(shape, srid=srid)
allowed = DBSession.query(func.count(RestrictionArea.id))
allowed = allowed.join(RestrictionArea.roles)
allowed = allowed.join(RestrictionArea.layers)
allowed = allowed.filter(Role.id == self.request.user.role.id)
allowed = allowed.filter(Layer.id == layer.id)
allowed = allowed.filter(or_(
RestrictionArea.area.is_(None),
RestrictionArea.area.ST_Contains(spatial_elt)
))
if allowed.scalar() == 0:
raise HTTPForbidden()
return feature
@view_config(route_name="layers_count", renderer="string")
def count(self):
set_common_headers(self.request, "layers", NO_CACHE)
protocol = self._get_protocol_for_request()
return protocol.count(self.request)
@view_config(route_name="layers_create", renderer="geojson")
def create(self):
set_common_headers(self.request, "layers", NO_CACHE)
if self.request.user is None:
raise HTTPForbidden()
self.request.response.cache_control.no_cache = True
layer = self._get_layer_for_request()
def check_geometry(r, feature, o):
geom = feature.geometry
if geom and not isinstance(geom, geojson.geometry.Default):
shape = asShape(geom)
srid = self._get_geom_col_info(layer)[1]
spatial_elt = from_shape(shape, srid=srid)
allowed = DBSession.query(func.count(RestrictionArea.id))
allowed = allowed.join(RestrictionArea.roles)
allowed = allowed.join(RestrictionArea.layers)
allowed = allowed.filter(RestrictionArea.readwrite.is_(True))
allowed = allowed.filter(Role.id == self.request.user.role.id)
allowed = allowed.filter(Layer.id == layer.id)
allowed = allowed.filter(or_(
RestrictionArea.area.is_(None),
RestrictionArea.area.ST_Contains(spatial_elt)
))
if allowed.scalar() == 0:
raise HTTPForbidden()
# check if geometry is valid
self._validate_geometry(spatial_elt)
protocol = self._get_protocol_for_layer(layer, before_create=check_geometry)
try:
features = protocol.create(self.request)
return features
except TopologicalError, e:
self.request.response.status_int = 400
return {"validation_error": str(e)}
@view_config(route_name="layers_update", renderer="geojson")
def update(self):
set_common_headers(self.request, "layers", NO_CACHE)
if self.request.user is None:
raise HTTPForbidden()
self.request.response.cache_control.no_cache = True
feature_id = self.request.matchdict.get("feature_id", None)
layer = self._get_layer_for_request()
def check_geometry(r, feature, o):
# we need both the "original" and "new" geometry to be
# within the restriction area
geom_attr, srid = self._get_geom_col_info(layer)
geom_attr = getattr(o, geom_attr)
geom = feature.geometry
allowed = DBSession.query(func.count(RestrictionArea.id))
allowed = allowed.join(RestrictionArea.roles)
allowed = allowed.join(RestrictionArea.layers)
allowed = allowed.filter(RestrictionArea.readwrite.is_(True))
allowed = allowed.filter(Role.id == self.request.user.role.id)
allowed = allowed.filter(Layer.id == layer.id)
allowed = allowed.filter(or_(
RestrictionArea.area.is_(None),
RestrictionArea.area.ST_Contains(geom_attr)
))
spatial_elt = None
if geom and not isinstance(geom, geojson.geometry.Default):
shape = asShape(geom)
spatial_elt = from_shape(shape, srid=srid)
allowed = allowed.filter(or_(
RestrictionArea.area.is_(None),
RestrictionArea.area.ST_Contains(spatial_elt)
))
if allowed.scalar() == 0:
raise HTTPForbidden()
# check is geometry is valid
self._validate_geometry(spatial_elt)
protocol = self._get_protocol_for_layer(layer, before_update=check_geometry)
try:
feature = protocol.update(self.request, feature_id)
return feature
except TopologicalError, e:
self.request.response.status_int = 400
return {"validation_error": str(e)}
def _validate_geometry(self, geom):
validate = self.settings.get("geometry_validation", False)
if validate and geom is not None:
simple = DBSession.query(func.ST_IsSimple(geom)).scalar()
if not simple:
raise TopologicalError("Not simple")
valid = DBSession.query(func.ST_IsValid(geom)).scalar()
if not valid:
reason = DBSession.query(func.ST_IsValidReason(geom)).scalar()
raise TopologicalError(reason)
@view_config(route_name="layers_delete")
def delete(self):
set_common_headers(self.request, "layers", NO_CACHE)
if self.request.user is None:
raise HTTPForbidden()
self.request.response.cache_control.no_cache = True
feature_id = self.request.matchdict.get("feature_id", None)
layer = self._get_layer_for_request()
def security_cb(r, o):
geom_attr = getattr(o, self._get_geom_col_info(layer)[0])
allowed = DBSession.query(func.count(RestrictionArea.id))
allowed = allowed.join(RestrictionArea.roles)
allowed = allowed.join(RestrictionArea.layers)
allowed = allowed.filter(RestrictionArea.readwrite.is_(True))
allowed = allowed.filter(Role.id == self.request.user.role.id)
allowed = allowed.filter(Layer.id == layer.id)
allowed = allowed.filter(or_(
RestrictionArea.area.is_(None),
RestrictionArea.area.ST_Contains(geom_attr)
))
if allowed.scalar() == 0:
raise HTTPForbidden()
protocol = self._get_protocol_for_layer(layer, before_delete=security_cb)
return protocol.delete(self.request, feature_id)
@view_config(route_name="layers_metadata", renderer="xsd")
def metadata(self):
set_common_headers(self.request, "layers", PRIVATE_CACHE)
layer = self._get_layer_for_request()
if not layer.public and self.request.user is None:
raise HTTPForbidden()
return self._metadata(str(layer.geo_table), layer.exclude_properties)
@cache_region.cache_on_arguments()
def _metadata(self, geo_table, exclude_properties):
return get_class(
geo_table,
exclude_properties=exclude_properties
)
@view_config(route_name="layers_enumerate_attribute_values", renderer="json")
def enumerate_attribute_values(self):
set_common_headers(self.request, "layers", PUBLIC_CACHE)
if self.layers_enum_config is None: # pragma: no cover
raise HTTPInternalServerError("Missing configuration")
general_dbsession_name = self.layers_enum_config.get("dbsession", "dbsession")
layername = self.request.matchdict["layer_name"]
fieldname = self.request.matchdict["field_name"]
# TODO check if layer is public or not
return self._enumerate_attribute_values(
general_dbsession_name, layername, fieldname
)
@cache_region.cache_on_arguments()
def _enumerate_attribute_values(self, general_dbsession_name, layername, fieldname):
if layername not in self.layers_enum_config: # pragma: no cover
raise HTTPBadRequest("Unknown layer: %s" % layername)
layerinfos = self.layers_enum_config[layername]
if fieldname not in layerinfos["attributes"]: # pragma: no cover
raise HTTPBadRequest("Unknown attribute: %s" % fieldname)
dbsession = DBSessions.get(
layerinfos.get("dbsession", general_dbsession_name), None
)
if dbsession is None: # pragma: no cover
raise HTTPInternalServerError(
"No dbsession found for layer '%s'" % layername
)
layer_table = layerinfos.get("table", None)
attrinfos = layerinfos["attributes"][fieldname]
attrinfos = {} if attrinfos is None else attrinfos
table = attrinfos.get("table", layer_table)
if table is None: # pragma: no cover
raise HTTPInternalServerError(
"No config table found for layer '%s'" % layername
)
layertable = get_table(table, session=dbsession)
column = attrinfos["column_name"] \
if "column_name" in attrinfos else fieldname
attribute = getattr(layertable.columns, column)
# For instance if `separator` is a "," we consider that the column contains a
# comma separate list of values e.g.: "value1,value2".
if "separator" in attrinfos:
separator = attrinfos["separator"]
attribute = func.unnest(func.string_to_array(
func.string_agg(attribute, separator), separator
))
values = dbsession.query(distinct(attribute)).order_by(attribute).all()
enum = {
"items": [{"label": value[0], "value": value[0]} for value in values]
}
return enum
| 41.909722 | 88 | 0.646065 |
36b54ce6e976ba04021dea69a438033c123627a4 | 60 | py | Python | lang/py/cookbook/v2/source/cb2_19_7_exm_2.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_19_7_exm_2.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_19_7_exm_2.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | weekly_sales = itertools.imap(sum, windows(daily_sales, 7))
| 30 | 59 | 0.783333 |
bcfd35594b162f609aa7c9d98f9a4ecde89e6195 | 4,876 | py | Python | grading_module/notes/was_the_original_ag/grader.py | minneapolis-edu/web-autograder | 96e8e976166576a396ac7b6f6fb978d62c20d665 | [
"Unlicense"
] | 1 | 2019-02-27T00:38:08.000Z | 2019-02-27T00:38:08.000Z | grading_module/notes/was_the_original_ag/grader.py | minneapolis-edu/web-autograder | 96e8e976166576a396ac7b6f6fb978d62c20d665 | [
"Unlicense"
] | 5 | 2020-02-11T23:32:47.000Z | 2022-03-02T02:54:52.000Z | grading_module/notes/was_the_original_ag/grader.py | minneapolis-edu/web-autograder | 96e8e976166576a396ac7b6f6fb978d62c20d665 | [
"Unlicense"
] | null | null | null | # Read the JSON file with grades
# Assume test and grades/file are correct
#
import json
import os
import re
'''
Example schema: test_file might be an array OR single string (barf, fixme)
{ "week" : 9 ,
"questions" : [
{
"question" : 1,
"java_file": "TicketProgram.java",
"test_file": "TicketTest.java",
"points": 5
},
{
"question" : 2,
"java_file": "AnotherProgram.java",
"test_file": ["SomeTicketTests.java", "YetMoreTests.java"]
"points": 15
},
]
}
'''
# scheme == json.load(grade_json_file)
def grade(project_location, test_set, scheme): # test set e.g. week_1
# Expect to have a file grages/week_1.json IN MY REPO NOT STUDENT REPO
#filename = os.path.join(project_location, 'grades', test_set_name + '.json')
# grade_json_file = os.path.join(project_location, 'grades', test_set + '.json')
# scheme = json.load(open(grade_json_file))
print('scheme is', scheme)
# surefile report filenames look like this
# test_package.testfile.txt
# test_week_1.Test_Mail_Prices.txt
# Open each file in the /target/surefire-reports dir
'''
-------------------------------------------------------------------------------
Test set: week_1.TestNASAAstronaut
-------------------------------------------------------------------------------
Tests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 0 s <<< FAILURE! - in test_week_1.TestNASAAstronaut
testAstronautQualifications(week_1.TestNASAAstronaut) Time elapsed: 0 s <<< FAILURE!
java.lang.AssertionError: Height = 60, swim = 75 should return true
at test_week_1.TestNASAAstronaut.testAstronautQualifications(TestNASAAstronaut.java:41)
'''
# Compare to filenames in json
results = {}
total_points = 0
for item in scheme["questions"]:
test_filenames = item['test_file'] # This is either a String or list. Ensure it is list
java_filename = item['java_file']
if type(test_filenames) is str:
test_filename_list = [ test_filenames ]
else:
test_filename_list = test_filenames
points_avail = item['points']
run = 0
passing_tests = 0
errors = 0
failures = 0
# Find this report
print(test_filenames)
for test_filename in test_filename_list:
try:
report_filename = '%s.%s.txt' % (test_set, test_filename)
print(report_filename)
report_location = os.path.join(project_location, 'target', 'surefire-reports', report_filename)
print(report_location)
with open(report_location) as f:
report = f.readlines()
q_run, q_errors, q_failures = extract(report[3]) # ugh
print('test file results: Points %d, run %d , errors %d, fails %d' % (points_avail, q_run, q_errors, q_failures))
# So question is worth e.g. 5 points. 3 tests, 1 fails. Student gets 5/3 * 2 points for this
except IOError:
print("Can't find the test report file. " + report_location + " Either the tests haven't run, or there's a build error for this project. Returning None " + report_filename)
#return None, None
# just be zeros
q_run, q_errors, q_failures = (0, 0, 0) # ugh
# Either the name in week_1.json is wrong
# Or, a MVN build error. TODO check for MVN build errors. One hacky way to to see if target/classes has anything in or not.
# If the tests haven't run, this file doesn't exist. Caller should check for this being none.
run = run + q_run
errors = errors + q_errors
failures = failures + q_failures
print('after adding test file results: Points %d, run %d , errors %d, fails %d' % (points_avail, run, errors, failures))
# For all tests for this question,
passing_tests = run - (errors + failures)
print("passing tests for this question", passing_tests)
if run == 0:
points_for_question = 0
else:
points_for_question = ( points_avail / run ) * passing_tests
print('points earned for this question', points_for_question )
total_points += points_for_question
print('total points now ', total_points)
results[java_filename] = points_for_question
print('RESULTS', results, total_points)
return results, total_points
def extract(line) :
run = re.search('(?<=Tests run: )\d+', line).group(0)
errors = re.search('(?<=Failures: )\d+', line).group(0)
failures = re.search('(?<=Errors: )\d+', line).group(0)
#print(run,errors, failures)
return int(run), int(errors), int(failures)
| 32.291391 | 188 | 0.596185 |
bfcb86bfe4ee14dd7f4eb8ae0edc5e0090f9080e | 5,766 | py | Python | FusionIIIT/applications/programme_curriculum/models.py | Draco-D/Fusion | 065f5f9939d6f736b6b42c2650e5a05aef5dab52 | [
"bzip2-1.0.6"
] | 1 | 2021-08-05T10:31:35.000Z | 2021-08-05T10:31:35.000Z | FusionIIIT/applications/programme_curriculum/models.py | Draco-D/Fusion | 065f5f9939d6f736b6b42c2650e5a05aef5dab52 | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/programme_curriculum/models.py | Draco-D/Fusion | 065f5f9939d6f736b6b42c2650e5a05aef5dab52 | [
"bzip2-1.0.6"
] | null | null | null | from django.db import models
import datetime
from django.db.models.fields import IntegerField, PositiveIntegerField
# Create your models here.
PROGRAMME_CATEGORY_CHOICES = [
('UG', 'Undergraduate'),
('PG', 'Postgraduate'),
('PHD', 'Doctor of Philosophy')
]
COURSESLOT_TYPE_CHOICES = [
('Professional Core', 'Professional Core'),
('Professional Elective', 'Professional Elective'),
('Professional Lab', 'Professional Lab'),
('Engineering Science', 'Engineering Science'),
('Natural Science', 'Natural Science'),
('Humanities', 'Humanities'),
('Design', 'Design'),
('Manufacturing', 'Manufacturing'),
('Management Science', 'Management Science')
]
class Programme(models.Model):
category = models.CharField(max_length=3, choices=PROGRAMME_CATEGORY_CHOICES, null=False, blank=False)
name = models.CharField(max_length=70, null=False, unique=True, blank=False)
def __str__(self):
return str(self.category + " - "+ self.name)
@property
def curriculums(self):
return Curriculum.objects.filter(programme=self.id)
def get_curriculums_objects(self):
return Curriculum.objects.filter(programme=self.id)
@property
def get_discipline_objects(self):
return Discipline.objects.filter(programmes=self.id)
def disciplines(self):
return Discipline.objects.filter(programmes=self.id)
class Discipline(models.Model):
name = models.CharField(max_length=100, null=False, unique=True, blank=False)
programmes = models.ManyToManyField(Programme)
def __str__(self):
return str(self.name)
@property
def batches(self):
return Batch.objects.filter(discipline=self.id)
def get_batch_objects(self):
return Batch.objects.filter(discipline=self.id)
class Curriculum(models.Model):
programme = models.ForeignKey(Programme, on_delete=models.CASCADE, null=False)
name = models.CharField(max_length=100, null=False, blank=False)
version = models.PositiveIntegerField(default=1, null=False)
working_curriculum = models.BooleanField(default=True, null=False)
no_of_semester = models.PositiveIntegerField(default=1, null=False)
min_credit = models.PositiveIntegerField(default = 0)
class Meta:
unique_together = ('name', 'version',)
def __str__(self):
return str(self.name + " v" + str(self.version))
@property
def batches(self):
return Batch.objects.filter(curriculum=self.id)
def get_batches(self):
return Batch.objects.filter(curriculum=self.id)
@property
def semesters(self):
return Semester.objects.filter(curriculum=self.id).order_by('semester_no')
def get_semesters_objects(self):
return Semester.objects.filter(curriculum=self.id).order_by('semester_no')
class Semester(models.Model):
curriculum = models.ForeignKey(Curriculum, null=False, on_delete=models.CASCADE)
semester_no = models.PositiveIntegerField(null=False)
class Meta:
unique_together = ('curriculum', 'semester_no',)
def __str__(self):
return str(Curriculum.__str__(self.curriculum) + ", sem-" + str(self.semester_no))
@property
def courseslots(self):
return CourseSlot.objects.filter(semester=self.id).order_by("id")
def get_courseslots_objects(self):
return CourseSlot.objects.filter(semester=self.id).order_by("id")
class Course(models.Model):
code = models.CharField(max_length=10, null=False, unique=True, blank=False)
name = models.CharField(max_length=100, null=False, unique=True, blank=False)
credit = models.PositiveIntegerField(default=0, null=False, blank=False)
lecture_hours = PositiveIntegerField(null=True, )
tutorial_hours = PositiveIntegerField(null=True)
pratical_hours = PositiveIntegerField(null=True)
discussion_hours = PositiveIntegerField(null=True)
project_hours = PositiveIntegerField(null=True)
pre_requisits = models.TextField(null=True)
syllabus = models.TextField()
evaluation_schema = models.TextField()
ref_books = models.TextField()
class Meta:
unique_together = ('code', 'name',)
def __str__(self):
return str(self.code + " - " +self.name)
@property
def courseslots(self):
return CourseSlot.objects.filter(courses=self.id)
def get_courseslots_objects(self):
return CourseSlot.objects.filter(courses=self.id)
class Batch(models.Model):
name = models.CharField(max_length=50, null=False, unique=True, blank=False)
discipline = models.ForeignKey(Discipline, null=False, on_delete=models.CASCADE)
year = models.PositiveIntegerField(default=datetime.date.today().year, null=False)
curriculum = models.ForeignKey(Curriculum, null=True, on_delete=models.SET_NULL)
class Meta:
unique_together = ('name', 'discipline', 'year',)
def __str__(self):
return str(self.name)
class CourseSlot(models.Model):
semester = models.ForeignKey(Semester, null=False, on_delete=models.CASCADE)
name = models.CharField(max_length=100, null=False, blank=False)
type = models.CharField(max_length=70, choices=COURSESLOT_TYPE_CHOICES, null=False)
# for_batches = models.ManyToManyField(Batch)
course_slot_info = models.TextField(null=True)
courses = models.ManyToManyField(Course)
min_registration_limit = models.PositiveIntegerField(default = 0)
max_registration_limit = models.PositiveIntegerField(default = 1000)
def __str__(self):
return str(Semester.__str__(self.semester) + ", " + self.name + ", id = " + str(self.id))
@property
def for_batches(self):
return ((Semester.objects.get(id=self.semester.id)).curriculum).batches | 35.813665 | 106 | 0.709157 |
0b67729a11d24d8a8956e32f2e65e6a2e9329971 | 1,787 | py | Python | django_semantic_ui/templatetags/dsu.py | franklintiel/django-semantic-ui | 2403fda64c71f31028a8db769cc92e8a7f0cb8f8 | [
"MIT"
] | 5 | 2018-12-16T23:40:40.000Z | 2021-06-13T23:37:10.000Z | django_semantic_ui/templatetags/dsu.py | franklintiel/django-semantic-ui | 2403fda64c71f31028a8db769cc92e8a7f0cb8f8 | [
"MIT"
] | 3 | 2019-04-08T07:56:49.000Z | 2021-02-01T13:34:24.000Z | django_semantic_ui/templatetags/dsu.py | franklintiel/django-semantic-ui | 2403fda64c71f31028a8db769cc92e8a7f0cb8f8 | [
"MIT"
] | 1 | 2019-01-08T10:02:16.000Z | 2019-01-08T10:02:16.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from django_semantic_ui.utils import Utils, Constant
from django_semantic_ui.models import SemanticUI
register = template.Library()
dsu = SemanticUI()
@register.simple_tag()
def dsu_javascript_url():
"""
Template tag that allow load the main semantic-ui javascript lib.
example to use
{% dsu_javascript_url %}
:return: string
"""
return Utils.render_tag(
tag='script',
attrs={
'src': Utils.get_semantic_javascript_url(semantic_folder=dsu.semantic_folder)})
@register.simple_tag()
def dsu_stylesheet_url():
"""
Template tag that allow load the main semantic-ui stylesheet.
example to use
{% dsu_stylesheet_url %}
:return:
"""
return Utils.render_tag(
tag='link',
attrs={
'href': Utils.get_semantic_stylesheet_url(semantic_folder=dsu.semantic_folder),
'rel': 'stylesheet',
'type': 'text/css'},
close=False)
@register.simple_tag()
def dsu_jquery_url():
"""
Template tag that allow load the default jquery javascript lib required by semantic-ui (this tag is optional)
example to use
{% dsu_jquery_url %}
NOTE: You can change the jquery_url in your settings.py using the DSU_JQUERY_URL setting.
:return: string
"""
if Constant.JQUERY_URL == dsu.jquery_url:
return Utils.render_tag(
tag='script',
attrs={
'src': dsu.jquery_url,
'integrity': 'sha256-hVVnYaiADRTO2PzUGmuLJr8BLUSjGIZsDYGmIJLv2b8=',
'crossorigin': 'anonymous'})
else:
return Utils.render_tag(
tag='script',
attrs={'src': dsu.jquery_url})
| 28.365079 | 113 | 0.643537 |
d2dd41e7ca7833b188816ed599bbf417544df047 | 5,978 | py | Python | userprofile/views.py | aneumeier/userprofile | 065f24b4cad82a4281a2e05968861d236b5d05ef | [
"MIT"
] | null | null | null | userprofile/views.py | aneumeier/userprofile | 065f24b4cad82a4281a2e05968861d236b5d05ef | [
"MIT"
] | null | null | null | userprofile/views.py | aneumeier/userprofile | 065f24b4cad82a4281a2e05968861d236b5d05ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Views
"""
from django.views.generic import TemplateView
from django.views.generic import FormView
from django.views.generic.detail import DetailView
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.models import User
from django.contrib.auth.views import login
from django.contrib.auth import logout
from django.shortcuts import redirect
from django.conf import settings
import urlparse
from .utils import default_redirect
from .forms import APAuthenticationForm
from .models import Profile
from social.backends.utils import load_backends
class Home(TemplateView):
template_name = "user/home.html"
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
context['available_backends'] = \
load_backends(settings.AUTHENTICATION_BACKENDS)
return context
class Login(FormView):
"""
from: https://github.com/stefanfoulis/django-class-based-auth-views/ \
blob/develop/class_based_auth_views/views.py
This is a class based version of django.contrib.auth.views.login.
Usage:
in urls.py:
url(r'^login/$',
LoginView.as_view(
form_class=MyCustomAuthFormClass,
success_url='/my/custom/success/url/),
name="login"),
"""
redirect_field_name = 'next'
form_class = APAuthenticationForm
template_name = "user/login.html"
success_url = "/user/"
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(Login, self).dispatch(*args, **kwargs)
def form_valid(self, form):
"""
The user has provided valid credentials (this was checked in
AuthenticationForm.is_valid()). So now we
can check the test cookie stuff and log him in.
"""
self.check_and_delete_test_cookie()
login(self.request, form.get_user())
return super(Login, self).form_valid(form)
def form_invalid(self, form):
"""
The user has provided invalid credentials (this was checked in
AuthenticationForm.is_valid()). So now we
set the test cookie again and re-render the form with errors.
"""
self.set_test_cookie()
return super(Login, self).form_invalid(form)
def get_success_url(self):
if self.success_url:
redirect_to = self.success_url
else:
redirect_to = self.request.REQUEST.get(
self.redirect_field_name,
''
)
netloc = urlparse.urlparse(redirect_to)[1]
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Security check -- don't allow redirection to a different host.
elif netloc and netloc != self.request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
return redirect_to
def set_test_cookie(self):
self.request.session.set_test_cookie()
def check_and_delete_test_cookie(self):
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return True
return False
def get(self, request, *args, **kwargs):
"""
Same as django.views.generic.edit.ProcessFormView.get(),
but adds test cookie stuff.
"""
self.set_test_cookie()
return super(Login, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(Login, self).get_context_data(**kwargs)
context['available_backends'] = \
load_backends(settings.AUTHENTICATION_BACKENDS)
return context
class Logout(TemplateView):
template_name = "user/home.html"
redirect_field_name = "next"
def get(self, *args, **kwargs):
logout(self.request)
if not self.request.user.is_authenticated():
return redirect(self.get_redirect_url())
context = self.get_context_data()
return self.render_to_response(context)
def post(self, *args, **kwargs):
if self.request.user.is_authenticated():
logout(self.request)
return redirect(self.get_redirect_url())
def get_context_data(self, **kwargs):
context = kwargs
redirect_field_name = self.get_redirect_field_name()
context.update({
"redirect_field_name": redirect_field_name,
"redirect_field_value":
self.request.REQUEST.get(redirect_field_name),
})
return context
def get_redirect_field_name(self):
return self.redirect_field_name
def get_redirect_url(self, fallback_url=None, **kwargs):
if fallback_url is None:
fallback_url = settings.LOGIN_URL
kwargs.setdefault(
"redirect_field_name",
self.get_redirect_field_name()
)
return default_redirect(self.request, fallback_url, **kwargs)
class RequireEmail(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super(RequireEmail, self).get_context_data(**kwargs)
context['email_required'] = True
context['backend'] = self.session['partial_pipeline']['backend']
context['available_backends'] = \
load_backends(settings.AUTHENTICATION_BACKENDS)
return context
class ProfileView(DetailView):
model = Profile
def get_object(self):
if 'pk' in self.request.GET:
user = User.objects.get(pk=self.request.GET['pk'])
return self.model.objects.get(user=user)
else:
user, created = self.model.objects.get_or_create(
user=self.request.user
)
if created:
user.save()
return user
| 30.5 | 74 | 0.65624 |
80587ec6d4d72f3c63807d07dbaea35fac808aa8 | 822 | py | Python | main/GOLD/gold_rates.py | TSG405/Unit-Converter | 98bdedc8130614ef41ba775b67488fb93df4b2da | [
"BSD-3-Clause"
] | 1 | 2021-04-25T05:33:53.000Z | 2021-04-25T05:33:53.000Z | main/GOLD/gold_rates.py | TSG405/Unit-Converter | 98bdedc8130614ef41ba775b67488fb93df4b2da | [
"BSD-3-Clause"
] | null | null | null | main/GOLD/gold_rates.py | TSG405/Unit-Converter | 98bdedc8130614ef41ba775b67488fb93df4b2da | [
"BSD-3-Clause"
] | null | null | null | import http.client
import json
import time
API = input("BOT: ENTER YOUR GOLD-API TOKEN :--- \t")
print("\n")
# API-ACCESS FUNCTION
def gold_api(API):
payload = ''
headers = {
'x-access-token': API,
'Content-Type': 'application/json'
}
url = "www.goldapi.io"
try:
conn = http.client.HTTPSConnection(url)
print("LOADING...")
time.sleep(2)
conn.request("GET", "/api/XAU/INR", payload, headers)
res = conn.getresponse()
return (json.loads(res.read().decode("utf-8")))
except:
print("\nERROR!! Try checking the API-PIN, AND INTERNET CONNECTIVITY!")
# DRIVER CODE..
try:
data = gold_api(API)
gold_price = data['price']
print("GOLD PRICE, for today [INR] :\t",gold_price)
except:
print("~Thank You")
@ CODED BY TSG405, 2021
| 17.489362 | 77 | 0.607056 |
3274af8195107628875f00052f449102daa241f4 | 12,183 | py | Python | graphene_django_extras/utils.py | ZuluPro/graphene-django-extras | 184ceea7c4807b4db5e9ae81077f09e251cff80d | [
"MIT"
] | null | null | null | graphene_django_extras/utils.py | ZuluPro/graphene-django-extras | 184ceea7c4807b4db5e9ae81077f09e251cff80d | [
"MIT"
] | null | null | null | graphene_django_extras/utils.py | ZuluPro/graphene-django-extras | 184ceea7c4807b4db5e9ae81077f09e251cff80d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import inspect
import re
import six
from collections import OrderedDict
from django import VERSION as DJANGO_VERSION
from django.apps import apps
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRel
from django.core.exceptions import ValidationError
from django.db.models import NOT_PROVIDED, QuerySet, Manager, Model, ManyToOneRel, ManyToManyRel
from django.db.models.base import ModelBase
from graphene.utils.str_converters import to_snake_case
from graphene_django.utils import is_valid_django_model
from graphql import GraphQLList, GraphQLNonNull
from graphql.language.ast import FragmentSpread
def get_reverse_fields(model):
for name, attr in model.__dict__.items():
# Django =>1.9 uses 'rel', django <1.9 uses 'related'
related = getattr(attr, 'rel', None) or \
getattr(attr, 'related', None)
if isinstance(related, ManyToOneRel):
yield (name, related)
elif isinstance(related, ManyToManyRel) and not related.symmetrical:
yield (name, related)
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
resolved_model = apps.get_model(app_name, model_name)
if resolved_model is None:
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured(msg.format(app_name, model_name))
return resolved_model
elif inspect.isclass(obj) and issubclass(obj, Model):
return obj
raise ValueError("{0} is not a Django model".format(obj))
def to_kebab_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', name.title().replace(' ', ''))
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
def get_related_model(field):
# Backward compatibility patch for Django versions lower than 1.9.x
if DJANGO_VERSION < (1, 9):
return _resolve_model(field.rel.to)
return field.remote_field.model
def get_model_fields(model):
# Backward compatibility patch for Django versions lower than 1.11.x
if DJANGO_VERSION >= (1, 11):
private_fields = model._meta.private_fields
else:
private_fields = model._meta.virtual_fields
all_fields_list = list(model._meta.fields) + \
list(model._meta.local_many_to_many) + \
list(private_fields) + \
list(model._meta.fields_map.values())
# Make sure we don't duplicate local fields with "reverse" version
# and get the real reverse django related_name
reverse_fields = list(get_reverse_fields(model))
exclude_fields = [field[1] for field in reverse_fields]
local_fields = [
(field.name, field)
for field
in all_fields_list if field not in exclude_fields
]
all_fields = local_fields + reverse_fields
return all_fields
def get_obj(app_label, model_name, object_id):
"""
Function used by my to get objst
:param app_label: A valid Django Model or a string with format: <app_label>.<model_name>
:param model_name: Key into kwargs that contains de data: new_person
:param object_id:
:return: instance
"""
try:
model = apps.get_model('{}.{}'.format(app_label, model_name))
assert is_valid_django_model(model), (
'Model {}.{} do not exist.').format(app_label, model_name)
obj = get_Object_or_None(model, pk=object_id)
return obj
except model.DoesNotExist:
return None
except LookupError as e:
pass
except ValidationError as e:
raise ValidationError(e.__str__())
except TypeError as e:
raise TypeError(e.__str__())
except Exception as e:
raise Exception(e.__str__())
def create_obj(model, new_obj_key=None, *args, **kwargs):
"""
Function used by my on traditional Mutations to create objs
:param model: A valid Django Model or a string with format:
<app_label>.<model_name>
:param new_obj_key: Key into kwargs that contains de data: new_person
:param args:
:param kwargs: Dict with model attributes values
:return: instance of model after saved it
"""
try:
if isinstance(model, six.string_types):
model = apps.get_model(model)
assert is_valid_django_model(model), (
'You need to pass a valid Django Model or a string with format: '
'<app_label>.<model_name> to "create_obj"'
' function, received "{}".').format(model)
data = kwargs.get(new_obj_key, None) if new_obj_key else kwargs
new_obj = model(**data)
new_obj.full_clean()
new_obj.save()
return new_obj
except LookupError as e:
pass
except ValidationError as e:
raise ValidationError(e.__str__())
except TypeError as e:
raise TypeError(e.__str__())
except Exception as e:
return e.__str__()
def clean_dict(d):
"""
Remove all empty fields in a nested dict
"""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (clean_dict(v) for v in d) if v]
return OrderedDict([(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v])
def get_type(_type):
if isinstance(_type, (GraphQLList, GraphQLNonNull)):
return get_type(_type.of_type)
return _type
def get_fields(info):
fragments = info.fragments
field_asts = info.field_asts[0].selection_set.selections
_type = get_type(info.return_type)
for field_ast in field_asts:
field_name = field_ast.name.value
if isinstance(field_ast, FragmentSpread):
for field in fragments[field_name].selection_set.selections:
yield field.name.value
continue
yield field_name
def is_required(field):
try:
blank = getattr(field, 'blank', getattr(field, 'field', None))
default = getattr(field, 'default', getattr(field, 'field', None))
null = getattr(field, 'null', getattr(field, 'field', None))
if blank is None:
blank = True
elif not isinstance(blank, bool):
blank = getattr(blank, 'blank', True)
if default is None:
default = NOT_PROVIDED
elif default != NOT_PROVIDED:
default = getattr(default, 'default', default)
except AttributeError:
return False
return not blank and default == NOT_PROVIDED
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '{}', but must be a Django Model, "
"Manager, or QuerySet".format(klass__name))
return manager.all()
def get_Object_or_None(klass, *args, **kwargs):
"""
Uses get() to return an object, or None if the object does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised
if more than one object is found.
Ex: get_Object_or_None(User, db, id=1)
"""
queryset = _get_queryset(klass)
try:
if args:
return queryset.using(args[0]).get(**kwargs)
else:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
# except queryset.model.MultipleObjectsReturned:
# return get_Objects_or_None(klass, *args, **kwargs)
def get_extra_filters(root, model):
extra_filters = {}
for field in model._meta.get_fields():
if field.is_relation and field.related_model == root._meta.model:
extra_filters.update({field.name: root})
return extra_filters
def get_related_fields(model):
return {
field.name: field
for field in model._meta.get_fields()
if field.is_relation and
not isinstance(field, (GenericForeignKey, GenericRel))
}
def find_field(field, fields_dict):
temp = fields_dict.get(
field.name.value,
fields_dict.get(
to_snake_case(field.name.value),
None)
)
return temp
def recursive_params(selection_set, fragments, available_related_fields,
select_related, prefetch_related):
for field in selection_set.selections:
if isinstance(field, FragmentSpread) and fragments:
a, b = recursive_params(
fragments[field.name.value].selection_set,
fragments,
available_related_fields,
select_related, prefetch_related
)
[select_related.append(x) for x in a if x not in select_related]
[prefetch_related.append(x)
for x in b if x not in prefetch_related]
continue
temp = available_related_fields.get(
field.name.value,
available_related_fields.get(
to_snake_case(field.name.value),
None)
)
if temp and temp.name not in [prefetch_related + select_related]:
if temp.many_to_many or temp.one_to_many:
prefetch_related.append(temp.name)
else:
select_related.append(temp.name)
elif getattr(field, 'selection_set', None):
a, b = recursive_params(
field.selection_set,
fragments,
available_related_fields,
select_related,
prefetch_related
)
[select_related.append(x) for x in a if x not in select_related]
[prefetch_related.append(x)
for x in b if x not in prefetch_related]
return select_related, prefetch_related
def queryset_factory(manager, fields_asts=None, fragments=None, **kwargs):
select_related = []
prefetch_related = []
available_related_fields = get_related_fields(manager.model)
for f in kwargs.keys():
temp = available_related_fields.get(f.split('__', 1)[0], None)
if temp:
if (temp.many_to_many or temp.one_to_many) and \
temp.name not in prefetch_related:
prefetch_related.append(temp.name)
else:
select_related.append(temp.name)
if fields_asts:
select_related, prefetch_related = recursive_params(
fields_asts[0].selection_set,
fragments,
available_related_fields,
select_related,
prefetch_related
)
if select_related and prefetch_related:
return _get_queryset(manager.select_related(
*select_related).prefetch_related(*prefetch_related))
elif not select_related and prefetch_related:
return _get_queryset(manager.prefetch_related(*prefetch_related))
elif select_related and not prefetch_related:
return _get_queryset(manager.select_related(*select_related))
return _get_queryset(manager)
def parse_validation_exc(validation_exc):
errors_list = []
for key, value in validation_exc.error_dict.items():
for exc in value:
errors_list.append({"field": key, "messages": exc.messages})
return errors_list
| 32.75 | 98 | 0.649183 |
a9292306679f05b64d4fd33f4afff7871ce39192 | 18,412 | py | Python | Lib/site-packages/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py | AbdelrahmanG/google_nl_api | 3252c1b6a24a5d763543efd15a799e97653a6cf3 | [
"0BSD"
] | null | null | null | Lib/site-packages/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py | AbdelrahmanG/google_nl_api | 3252c1b6a24a5d763543efd15a799e97653a6cf3 | [
"0BSD"
] | null | null | null | Lib/site-packages/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py | AbdelrahmanG/google_nl_api | 3252c1b6a24a5d763543efd15a799e97653a6cf3 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.language_v1.types import language_service
from .base import LanguageServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import LanguageServiceGrpcTransport
class LanguageServiceGrpcAsyncIOTransport(LanguageServiceTransport):
"""gRPC AsyncIO backend transport for LanguageService.
Provides text analysis operations such as sentiment analysis
and entity recognition.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "language.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "language.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def analyze_sentiment(
self,
) -> Callable[
[language_service.AnalyzeSentimentRequest],
Awaitable[language_service.AnalyzeSentimentResponse],
]:
r"""Return a callable for the analyze sentiment method over gRPC.
Analyzes the sentiment of the provided text.
Returns:
Callable[[~.AnalyzeSentimentRequest],
Awaitable[~.AnalyzeSentimentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_sentiment" not in self._stubs:
self._stubs["analyze_sentiment"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeSentiment",
request_serializer=language_service.AnalyzeSentimentRequest.serialize,
response_deserializer=language_service.AnalyzeSentimentResponse.deserialize,
)
return self._stubs["analyze_sentiment"]
@property
def analyze_entities(
self,
) -> Callable[
[language_service.AnalyzeEntitiesRequest],
Awaitable[language_service.AnalyzeEntitiesResponse],
]:
r"""Return a callable for the analyze entities method over gRPC.
Finds named entities (currently proper names and
common nouns) in the text along with entity types,
salience, mentions for each entity, and other
properties.
Returns:
Callable[[~.AnalyzeEntitiesRequest],
Awaitable[~.AnalyzeEntitiesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_entities" not in self._stubs:
self._stubs["analyze_entities"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeEntities",
request_serializer=language_service.AnalyzeEntitiesRequest.serialize,
response_deserializer=language_service.AnalyzeEntitiesResponse.deserialize,
)
return self._stubs["analyze_entities"]
@property
def analyze_entity_sentiment(
self,
) -> Callable[
[language_service.AnalyzeEntitySentimentRequest],
Awaitable[language_service.AnalyzeEntitySentimentResponse],
]:
r"""Return a callable for the analyze entity sentiment method over gRPC.
Finds entities, similar to
[AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities]
in the text and analyzes sentiment associated with each entity
and its mentions.
Returns:
Callable[[~.AnalyzeEntitySentimentRequest],
Awaitable[~.AnalyzeEntitySentimentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_entity_sentiment" not in self._stubs:
self._stubs["analyze_entity_sentiment"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment",
request_serializer=language_service.AnalyzeEntitySentimentRequest.serialize,
response_deserializer=language_service.AnalyzeEntitySentimentResponse.deserialize,
)
return self._stubs["analyze_entity_sentiment"]
@property
def analyze_syntax(
self,
) -> Callable[
[language_service.AnalyzeSyntaxRequest],
Awaitable[language_service.AnalyzeSyntaxResponse],
]:
r"""Return a callable for the analyze syntax method over gRPC.
Analyzes the syntax of the text and provides sentence
boundaries and tokenization along with part of speech
tags, dependency trees, and other properties.
Returns:
Callable[[~.AnalyzeSyntaxRequest],
Awaitable[~.AnalyzeSyntaxResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_syntax" not in self._stubs:
self._stubs["analyze_syntax"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeSyntax",
request_serializer=language_service.AnalyzeSyntaxRequest.serialize,
response_deserializer=language_service.AnalyzeSyntaxResponse.deserialize,
)
return self._stubs["analyze_syntax"]
@property
def classify_text(
self,
) -> Callable[
[language_service.ClassifyTextRequest],
Awaitable[language_service.ClassifyTextResponse],
]:
r"""Return a callable for the classify text method over gRPC.
Classifies a document into categories.
Returns:
Callable[[~.ClassifyTextRequest],
Awaitable[~.ClassifyTextResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "classify_text" not in self._stubs:
self._stubs["classify_text"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1.LanguageService/ClassifyText",
request_serializer=language_service.ClassifyTextRequest.serialize,
response_deserializer=language_service.ClassifyTextResponse.deserialize,
)
return self._stubs["classify_text"]
@property
def annotate_text(
self,
) -> Callable[
[language_service.AnnotateTextRequest],
Awaitable[language_service.AnnotateTextResponse],
]:
r"""Return a callable for the annotate text method over gRPC.
A convenience method that provides all the features
that analyzeSentiment, analyzeEntities, and
analyzeSyntax provide in one call.
Returns:
Callable[[~.AnnotateTextRequest],
Awaitable[~.AnnotateTextResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "annotate_text" not in self._stubs:
self._stubs["annotate_text"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnnotateText",
request_serializer=language_service.AnnotateTextRequest.serialize,
response_deserializer=language_service.AnnotateTextResponse.deserialize,
)
return self._stubs["annotate_text"]
def close(self):
return self.grpc_channel.close()
__all__ = ("LanguageServiceGrpcAsyncIOTransport",)
| 43.838095 | 98 | 0.643222 |
9d79d8938ac10c77c73544f7f3a7cc305eaab842 | 1,019 | py | Python | zendesk/komand_zendesk/actions/show_user/schema.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2020-03-18T09:14:55.000Z | 2020-03-18T09:14:55.000Z | zendesk/komand_zendesk/actions/show_user/schema.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | zendesk/komand_zendesk/actions/show_user/schema.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Retrieve user information"
class Input:
USER_ID = "user_id"
class Output:
USER = "user"
class ShowUserInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"user_id": {
"type": "string",
"title": "User ID",
"description": "ID of user to show E.g. 20444826487",
"order": 1
}
},
"required": [
"user_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ShowUserOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"user": {
"type": "object",
"title": "User Info",
"description": "User meta data",
"order": 1
}
},
"required": [
"user"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 16.435484 | 59 | 0.554465 |
7bab07a547f268e4e48f7f00cddec1af2abd0ead | 975 | py | Python | tests/sentry/api/endpoints/test_project_stats.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/endpoints/test_project_stats.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 8 | 2019-12-28T23:49:55.000Z | 2022-03-02T04:34:18.000Z | tests/sentry/api/endpoints/test_project_stats.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry import tsdb
from sentry.testutils import APITestCase
class ProjectStatsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project1 = self.create_project(name='foo')
project2 = self.create_project(name='bar')
tsdb.incr(tsdb.models.project_total_received, project1.id, count=3)
tsdb.incr(tsdb.models.project_total_received, project2.id, count=5)
url = reverse('sentry-api-0-project-stats', kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data[-1][1] == 3, response.data
for point in response.data[:-1]:
assert point[1] == 0
assert len(response.data) == 24
| 32.5 | 75 | 0.670769 |
1847781855d5cfa00a0b56ec8fb66cffb60b6f9a | 502 | py | Python | src/element_generation.py | cemulate/minecraft-hdl | a46da8d2a29aad9c2fc84037d677190c6db80dcd | [
"MIT"
] | 5 | 2015-09-11T04:13:01.000Z | 2021-11-17T14:35:28.000Z | src/element_generation.py | cemulate/minecraft-hdl | a46da8d2a29aad9c2fc84037d677190c6db80dcd | [
"MIT"
] | null | null | null | src/element_generation.py | cemulate/minecraft-hdl | a46da8d2a29aad9c2fc84037d677190c6db80dcd | [
"MIT"
] | 1 | 2021-03-15T17:31:27.000Z | 2021-03-15T17:31:27.000Z | from Input import *
import combinational_element_factory
import addition_element_factory
def generate_element(equation, use_input_color_key = None, use_output_color_key = None):
if type(equation) is SumOfProductsEquation:
return combinational_element_factory.generate(equation, use_input_color_key, use_output_color_key)
elif type(equation) is AdditionEquation:
return addition_element_factory.generate(equation, use_input_color_key, use_output_color_key) | 45.636364 | 106 | 0.796813 |
7ee5eed4b383fcb1fb617a103fdd258e4d14c512 | 707 | py | Python | src/openbiolink/graph_creation/file_processor/onto_mapping/ontoMapDoUmlsProcessor.py | jerryhluo/OpenBioLink | 6fc073af978daec0b0db5938b73beed37f57f495 | [
"MIT"
] | 97 | 2019-11-26T09:53:18.000Z | 2022-03-19T10:33:10.000Z | src/openbiolink/graph_creation/file_processor/onto_mapping/ontoMapDoUmlsProcessor.py | jerryhluo/OpenBioLink | 6fc073af978daec0b0db5938b73beed37f57f495 | [
"MIT"
] | 67 | 2019-12-09T21:01:52.000Z | 2021-12-21T15:19:41.000Z | src/openbiolink/graph_creation/file_processor/onto_mapping/ontoMapDoUmlsProcessor.py | jerryhluo/OpenBioLink | 6fc073af978daec0b0db5938b73beed37f57f495 | [
"MIT"
] | 20 | 2020-01-13T23:02:25.000Z | 2022-03-16T21:43:31.000Z | from openbiolink.graph_creation.file_processor.fileProcessor import FileProcessor
from openbiolink.graph_creation.metadata_infile.mapping.inMetaMapOntoDoUmls import InMetaMapOntoDoUmls
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.graph_creation.types.readerType import ReaderType
class OntoMapDoUmlsProcessor(FileProcessor):
IN_META_CLASS = InMetaMapOntoDoUmls
def __init__(self):
self.use_cols = self.IN_META_CLASS.USE_COLS
super().__init__(
self.use_cols,
readerType=ReaderType.READER_ONTO_DO,
infileType=InfileType.IN_MAP_ONTO_DO_UMLS,
mapping_sep=self.IN_META_CLASS.MAPPING_SEP,
)
| 39.277778 | 102 | 0.779349 |
96b3f3dda7fe13ed5ef2f34c83781613614f91a7 | 5,055 | py | Python | fjarrsyn/tests/test_agent_fsimulate_sample_1.py | anderzzz/fjarrsyn | 742f86d727634e3612cf0b4e1ca78af9d0ccf525 | [
"BSD-3-Clause"
] | null | null | null | fjarrsyn/tests/test_agent_fsimulate_sample_1.py | anderzzz/fjarrsyn | 742f86d727634e3612cf0b4e1ca78af9d0ccf525 | [
"BSD-3-Clause"
] | null | null | null | fjarrsyn/tests/test_agent_fsimulate_sample_1.py | anderzzz/fjarrsyn | 742f86d727634e3612cf0b4e1ca78af9d0ccf525 | [
"BSD-3-Clause"
] | null | null | null | '''Integration test of finite simulation of a system
'''
import pytest
import os
from fjarrsyn.simulation.simulator import FiniteSystemRunner
from fjarrsyn.simulation.sampler import AgentSampler, SystemIO
from fjarrsyn.core.agent import Agent
from fjarrsyn.core.agent_ms import AgentManagementSystem
from fjarrsyn.core.message import Belief, Resource, Essence
from fjarrsyn.core.instructor import Interpreter
from fjarrsyn.core.scaffold_map import ResourceMap
from fjarrsyn.core.mover import Mover
REF_ENERGY = [20.0, 120.0]
REF_BELIEF = [(11874.41406, 0.625), (11241.60156, 0.625)]
def isclose(a, b, rel_tol=1e-9, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def propagator(system):
for node in system:
node.agent_content()
class Thinker(Agent):
def contemplation(self, diameter_value, precision):
diameter_value += (12742.0 - diameter_value) * 0.25
precision = precision * 0.5
energy_cost = 20.0
return diameter_value, precision, -1.0 * energy_cost
def __call__(self):
self.interpret('Contemplate')
def __init__(self, name, diameter_init, energy_init, essence_value):
super().__init__(name, strict_engine=True)
belief = Belief('The diameter of the world', ('value', 'precision'))
belief.set_values([diameter_init, 10.0])
belief_dummy = Belief('Sky colour', ('colour_name',))
belief_dummy.set_values(['blue'])
self.set_message(belief_dummy)
resource = Resource('Dietary energy', ('value',))
essence = Essence('Persistence', ('value',))
resource.set_values(energy_init)
essence.set_values(essence_value)
self.set_scaffolds(resource, essence)
metabolism = ResourceMap('Dietary energy adjustment', 'delta', 'value', ('shift',))
interpreter = Interpreter('Contemplate', self.contemplation, belief, belief,
metabolism)
self.set_organ(interpreter)
def test_main():
agent_1 = Thinker('Alpha', 10000.0, 100.0, 2.0)
agent_2 = Thinker('Beta', 8000.0, 200.0, 5.0)
ams = AgentManagementSystem('Pair', [agent_1, agent_2])
agent_sampler_1 = AgentSampler('sampler_1',
resource_args=[('Dietary energy', 'value')],
essence_args=[('Persistence', 'value')],
belief_args=[('The diameter of the world', 'value'),
('Sky colour', 'colour_name')],
sample_steps=2)
agent_sampler_2 = AgentSampler('sampler_2',
essence_args=[('Persistence', 'value')],
sample_steps=3)
io = SystemIO([('tmp_1', agent_sampler_1, 'to_csv'),
('tmp_2', agent_sampler_2, 'to_json')])
mover = Mover('move_thinker', propagator)
runner = FiniteSystemRunner(4, mover, system_io=io)
runner(ams)
exist_1 = os.path.isfile('tmp_10.csv')
exist_2 = os.path.isfile('tmp_12.csv')
exist_3 = os.path.isfile('tmp_20.json')
exist_4 = os.path.isfile('tmp_23.json')
assert(exist_1)
assert(exist_2)
assert(exist_3)
assert(exist_4)
if exist_1:
data = open('tmp_10.csv').read()
assert ('belief:Sky colour:colour_name,blue' in data)
assert ('belief:The diameter of the world:value,10685.5' in data)
assert ('essence:Persistence:value,2.0' in data)
assert ('resource:Dietary energy:value,80.0' in data)
assert ('belief:The diameter of the world:value,9185.5' in data)
assert ('essence:Persistence:value,5.0' in data)
assert ('resource:Dietary energy:value,180.0' in data)
os.remove('tmp_10.csv')
if exist_2:
data = open('tmp_12.csv').read()
assert ('belief:Sky colour:colour_name,blue' in data)
assert ('belief:The diameter of the world:value,11585.21875' in data)
assert ('essence:Persistence:value,2.0' in data)
assert ('resource:Dietary energy:value,40.0' in data)
assert ('belief:The diameter of the world:value,10741.46875' in data)
assert ('essence:Persistence:value,5.0' in data)
assert ('resource:Dietary energy:value,140.0' in data)
os.remove('tmp_12.csv')
if exist_3:
data = open('tmp_20.json').read()
assert ('{"value":{"[0,"Alpha",' in data)
assert ('"essence:Persistence:value"]":2.0' in data)
assert ('"essence:Persistence:value"]":5.0' in data)
assert (not 'belief' in data)
assert (not 'resource' in data)
os.remove('tmp_20.json')
if exist_4:
data = open('tmp_23.json').read()
assert ('{"value":{"[3,"Alpha",' in data)
assert ('"essence:Persistence:value"]":2.0' in data)
assert ('"essence:Persistence:value"]":5.0' in data)
assert (not 'belief' in data)
assert (not 'resource' in data)
os.remove('tmp_23.json')
| 38.007519 | 91 | 0.618991 |
34f8a5dda0eae9b0d615bc3080ed246f99472dbb | 3,905 | py | Python | scrape_mars.py | rashhola/mission-to-mars | ca05299e45a67fb69f7b152c930dee4500c98464 | [
"Apache-2.0"
] | null | null | null | scrape_mars.py | rashhola/mission-to-mars | ca05299e45a67fb69f7b152c930dee4500c98464 | [
"Apache-2.0"
] | null | null | null | scrape_mars.py | rashhola/mission-to-mars | ca05299e45a67fb69f7b152c930dee4500c98464 | [
"Apache-2.0"
] | null | null | null | # Define a function called `scrape` that will execute all of your scraping code from the `mission_to_mars.ipynb` notebook and return one Python dictionary containing all of the scraped data.
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import pymongo
import time
# Function `scrape` will execute all of scraping code from `mission_to_mars.ipynb`
# Return one Python dictionary containing all of the scraped data.
def scrape():
# Set the executable path and initialize the chrome browser in splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
mars_data = {}
news = mars_news(browser)
mars_data['newstitle'] = news[0]
mars_data['parent news'] = news[1]
mars_data['image'] = mars_image(browser)
mars_data['facts'] = mars_facts(browser)
mars_data['hemis'] = mars_hemispheres(browser)
return mars_data
# Scrapes NASA Mars News Site
# Pulls out latest news title and paragraph description
def mars_news(browser):
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(5)
html = browser.html
sitesoup = BeautifulSoup(html, 'html.parser')
siteelement = sitesoup.select_one('ul.item_list li.slide')
siteelement.find("div", class_='content_title')
newstitle = siteelement.find("div", class_='content_title').get_text()
parentnews = siteelement.find('div', class_="article_teaser_body").get_text()
news_description = parentnews[0].text
news = [newstitle, news_description]
return news
# Scrapes JPL Mars Space Image Site
# Pulls out featured image of Mars
def mars_image(browser):
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
time.sleep(5)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
img = soup.findAll('img', class_= "headerimage fade-in")
imgfound= img[0].attrs['src']
imgfound_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/'+imgfound
return imgfound_url
# Scrapes Space Facts Site
# Pulls out table with Mars facts and converts the table from Pandas to HTML format
def mars_facts(browser):
goin2mars = 'https://space-facts.com/mars/'
browser.visit(goin2mars)
mars_data = pd.read_html(goin2mars)[1]
cleaned_mars_data = mars_data.rename(columns= {'Mars - Earth Comparison': 'Attributes'}).drop(columns = ["Earth"])
cleaned_mars_data
cleaned_mars_data_table = cleaned_mars_data.to_html(header=False, index=False)
return cleaned_mars_data_table
# Scrapes Astrogeology USGS Site
# Pulls out high resolution images for each of Mar's hemispheres
# Results of image titles and urls are in list of dictionary format
def mars_hemispheres(browser):
url ="https://astrogeology.usgs.gov"
hemisphereUrl = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemisphereUrl)
time.sleep(1)
html_hemisphere = browser.html
soup_hemisphere = BeautifulSoup(html_hemisphere,'html.parser')
list_imgs_url = []
imgtitles = soup_hemisphere.findAll('h3')
for title in range(len(imgtitles)):
hemisphere ={}
imgtitles = soup_hemisphere.findAll('h3')[title].text
imgUrl = url + soup_hemisphere.findAll('a', class_='itemLink product-item')[title]['href']
browser.visit(imgUrl)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
imagelinktitle = soup.find('div', class_='downloads').find('a')['href']
hemisphere["title"] = imgtitles
hemisphere["imgUrl"] = imagelinktitle
list_imgs_url.append(hemisphere)
return list_imgs_url
| 34.557522 | 191 | 0.708323 |
5254a9becec456d5b8305f823f8626cba71191bc | 25,431 | py | Python | desktop/core/ext-py/future-0.16.0/tests/test_future/test_bytes.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 3 | 2018-01-29T14:16:02.000Z | 2019-02-05T21:33:05.000Z | desktop/core/ext-py/future-0.16.0/tests/test_future/test_bytes.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 7 | 2019-11-28T21:48:38.000Z | 2020-08-02T18:06:40.000Z | desktop/core/ext-py/future-0.16.0/tests/test_future/test_bytes.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 6 | 2020-05-29T21:46:30.000Z | 2020-12-15T20:32:19.000Z | # -*- coding: utf-8 -*-
"""
Tests for the backported bytes object
"""
from __future__ import absolute_import, unicode_literals, print_function
from future.builtins import *
from future import utils
from numbers import Integral
from future.tests.base import unittest, expectedFailurePY2
TEST_UNICODE_STR = u'ℝεα∂@ßʟ℮ ☂ℯṧт υηḯ¢☺ḓ℮'
# Tk icon as a .gif:
TEST_BYTE_STR = b'GIF89a\x0e\x00\x0b\x00\x80\xff\x00\xff\x00\x00\xc0\xc0\xc0!\xf9\x04\x01\x00\x00\x01\x00,\x00\x00\x00\x00\x0e\x00\x0b\x00@\x02\x1f\x0c\x8e\x10\xbb\xcan\x90\x99\xaf&\xd8\x1a\xce\x9ar\x06F\xd7\xf1\x90\xa1c\x9e\xe8\x84\x99\x89\x97\xa2J\x01\x00;\x1a\x14\x00;;\xba\nD\x14\x00\x00;;'
class TestBytes(unittest.TestCase):
def test_bytes_encoding_arg(self):
"""
The bytes class has changed in Python 3 to accept an
additional argument in the constructor: encoding.
It would be nice to support this without breaking the
isinstance(..., bytes) test below.
"""
u = u'Unicode string: \u5b54\u5b50'
b = bytes(u, encoding='utf-8')
self.assertEqual(b, u.encode('utf-8'))
nu = str(u)
b = bytes(nu, encoding='utf-8')
self.assertEqual(b, u.encode('utf-8'))
def test_bytes_encoding_arg_issue_193(self):
"""
This used to be True: bytes(str(u'abc'), 'utf8') == b"b'abc'"
"""
u = u'abc'
b = bytes(str(u), 'utf8')
self.assertNotEqual(b, b"b'abc'")
self.assertEqual(b, b'abc')
self.assertEqual(b, bytes(b'abc'))
def test_bytes_encoding_arg_non_kwarg(self):
"""
As above, but with a positional argument
"""
u = u'Unicode string: \u5b54\u5b50'
b = bytes(u, 'utf-8')
self.assertEqual(b, u.encode('utf-8'))
nu = str(u)
b = bytes(nu, 'utf-8')
self.assertEqual(b, u.encode('utf-8'))
def test_bytes_string_no_encoding(self):
with self.assertRaises(TypeError):
bytes(u'ABC')
def test_bytes_int(self):
"""
In Py3, bytes(int) -> bytes object of size given by the parameter initialized with null
"""
self.assertEqual(bytes(5), b'\x00\x00\x00\x00\x00')
# Test using newint:
self.assertEqual(bytes(int(5)), b'\x00\x00\x00\x00\x00')
self.assertTrue(isinstance(bytes(int(5)), bytes))
# Negative counts are not allowed in Py3:
with self.assertRaises(ValueError):
bytes(-1)
with self.assertRaises(ValueError):
bytes(int(-1))
@unittest.skipIf(utils.PY3, 'test not needed on Py3: all ints are long')
def test_bytes_long(self):
"""
As above, but explicitly feeding in a long on Py2. Note that
checks like:
isinstance(n, int)
are fragile on Py2, because isinstance(10L, int) is False.
"""
m = long(5)
n = long(-1)
self.assertEqual(bytes(m), b'\x00\x00\x00\x00\x00')
# Negative counts are not allowed in Py3:
with self.assertRaises(ValueError):
bytes(n)
def test_bytes_empty(self):
"""
bytes() -> b''
"""
self.assertEqual(bytes(), b'')
def test_bytes_iterable_of_ints(self):
self.assertEqual(bytes([65, 66, 67]), b'ABC')
self.assertEqual(bytes([int(120), int(121), int(122)]), b'xyz')
def test_bytes_bytes(self):
self.assertEqual(bytes(b'ABC'), b'ABC')
def test_bytes_is_bytes(self):
b = bytes(b'ABC')
self.assertTrue(bytes(b) is b)
self.assertEqual(repr(bytes(b)), "b'ABC'")
def test_bytes_fromhex(self):
self.assertEqual(bytes.fromhex('bb 0f'), b'\xbb\x0f')
self.assertEqual(bytes.fromhex('1234'), b'\x124')
self.assertEqual(bytes.fromhex('12ffa0'), b'\x12\xff\xa0')
b = b'My bytestring'
self.assertEqual(bytes(b).fromhex('bb 0f'), b'\xbb\x0f')
def test_isinstance_bytes(self):
self.assertTrue(isinstance(bytes(b'blah'), bytes))
def test_isinstance_bytes_subclass(self):
"""
Issue #89
"""
value = bytes(b'abc')
class Magic(bytes):
pass
self.assertTrue(isinstance(value, bytes))
self.assertFalse(isinstance(value, Magic))
def test_isinstance_oldbytestrings_bytes(self):
"""
Watch out for this. Byte-strings produced in various places in Py2
are of type 'str'. With 'from future.builtins import bytes', 'bytes'
is redefined to be a subclass of 'str', not just an alias for 'str'.
"""
self.assertTrue(isinstance(b'blah', bytes)) # not with the redefined bytes obj
self.assertTrue(isinstance(u'blah'.encode('utf-8'), bytes)) # not with the redefined bytes obj
def test_bytes_getitem(self):
b = bytes(b'ABCD')
self.assertEqual(b[0], 65)
self.assertEqual(b[-1], 68)
self.assertEqual(b[0:1], b'A')
self.assertEqual(b[:], b'ABCD')
@expectedFailurePY2
def test_b_literal_creates_newbytes_object(self):
"""
It would nice if the b'' literal syntax could be coaxed into producing
bytes objects somehow ... ;)
"""
b = b'ABCD'
self.assertTrue(isinstance(b, bytes))
self.assertEqual(b[0], 65)
self.assertTrue(repr(b).startswith('b'))
def test_repr(self):
b = bytes(b'ABCD')
self.assertTrue(repr(b).startswith('b'))
def test_str(self):
b = bytes(b'ABCD')
self.assertTrue(str(b), "b'ABCD'")
def test_bytes_setitem(self):
b = b'ABCD'
with self.assertRaises(TypeError):
b[0] = b'B'
def test_bytes_iteration(self):
b = bytes(b'ABCD')
for item in b:
self.assertTrue(isinstance(item, Integral))
self.assertEqual(list(b), [65, 66, 67, 68])
def test_bytes_plus_unicode_string(self):
b = bytes(b'ABCD')
u = u'EFGH'
with self.assertRaises(TypeError):
b + u
with self.assertRaises(TypeError):
u + b
def test_bytes_plus_bytes(self):
b1 = bytes(b'ABCD')
b2 = b1 + b1
self.assertEqual(b2, b'ABCDABCD')
self.assertTrue(isinstance(b2, bytes))
b3 = b1 + b'ZYXW'
self.assertEqual(b3, b'ABCDZYXW')
self.assertTrue(isinstance(b3, bytes))
b4 = b'ZYXW' + b1
self.assertEqual(b4, b'ZYXWABCD')
self.assertTrue(isinstance(b4, bytes))
def test_find_not_found(self):
self.assertEqual(-1, bytes(b'ABCDE').find(b':'))
def test_find_found(self):
self.assertEqual(2, bytes(b'AB:CD:E').find(b':'))
def test_rfind_not_found(self):
self.assertEqual(-1, bytes(b'ABCDE').rfind(b':'))
def test_rfind_found(self):
self.assertEqual(5, bytes(b'AB:CD:E').rfind(b':'))
def test_bytes_join_bytes(self):
b = bytes(b' * ')
strings = [b'AB', b'EFGH', b'IJKL']
result = b.join(strings)
self.assertEqual(result, b'AB * EFGH * IJKL')
self.assertTrue(isinstance(result, bytes))
def test_bytes_join_others(self):
b = bytes(b' ')
with self.assertRaises(TypeError):
b.join([42])
with self.assertRaises(TypeError):
b.join(b'blah')
with self.assertRaises(TypeError):
b.join(bytes(b'blah'))
def test_bytes_join_unicode_strings(self):
b = bytes(b'ABCD')
strings = [u'EFGH', u'IJKL']
with self.assertRaises(TypeError):
b.join(strings)
def test_bytes_replace(self):
b = bytes(b'ABCD')
c = b.replace(b'A', b'F')
self.assertEqual(c, b'FBCD')
self.assertTrue(isinstance(c, bytes))
with self.assertRaises(TypeError):
b.replace(b'A', u'F')
with self.assertRaises(TypeError):
b.replace(u'A', b'F')
def test_bytes_partition(self):
b1 = bytes(b'ABCD')
parts = b1.partition(b'B')
self.assertEqual(parts, (b'A', b'B', b'CD'))
self.assertTrue(all([isinstance(p, bytes) for p in parts]))
b2 = bytes(b'ABCDABCD')
parts = b2.partition(b'B')
self.assertEqual(parts, (b'A', b'B', b'CDABCD'))
def test_bytes_rpartition(self):
b2 = bytes(b'ABCDABCD')
parts = b2.rpartition(b'B')
self.assertEqual(parts, (b'ABCDA', b'B', b'CD'))
self.assertTrue(all([isinstance(p, bytes) for p in parts]))
def test_bytes_contains_something(self):
b = bytes(b'ABCD')
self.assertTrue(b'A' in b)
self.assertTrue(65 in b)
self.assertTrue(b'AB' in b)
self.assertTrue(bytes([65, 66]) in b)
self.assertFalse(b'AC' in b)
self.assertFalse(bytes([65, 67]) in b)
self.assertFalse(b'Z' in b)
self.assertFalse(99 in b)
with self.assertRaises(TypeError):
u'A' in b
def test_bytes_index(self):
b = bytes(b'ABCD')
self.assertEqual(b.index(b'B'), 1)
self.assertEqual(b.index(67), 2)
def test_startswith(self):
b = bytes(b'abcd')
self.assertTrue(b.startswith(b'a'))
self.assertTrue(b.startswith((b'a', b'b')))
self.assertTrue(b.startswith(bytes(b'ab')))
self.assertFalse(b.startswith((b'A', b'B')))
with self.assertRaises(TypeError) as cm:
b.startswith(65)
with self.assertRaises(TypeError) as cm:
b.startswith([b'A'])
exc = str(cm.exception)
# self.assertIn('bytes', exc)
# self.assertIn('tuple', exc)
def test_endswith(self):
b = bytes(b'abcd')
self.assertTrue(b.endswith(b'd'))
self.assertTrue(b.endswith((b'c', b'd')))
self.assertTrue(b.endswith(bytes(b'cd')))
self.assertFalse(b.endswith((b'A', b'B')))
with self.assertRaises(TypeError) as cm:
b.endswith(65)
with self.assertRaises(TypeError) as cm:
b.endswith([b'D'])
exc = str(cm.exception)
# self.assertIn('bytes', exc)
# self.assertIn('tuple', exc)
def test_decode(self):
b = bytes(b'abcd')
s = b.decode('utf-8')
self.assertEqual(s, 'abcd')
self.assertTrue(isinstance(s, str))
def test_encode(self):
b = bytes(b'abcd')
with self.assertRaises(AttributeError) as cm:
b.encode('utf-8')
def test_eq(self):
"""
Equals: ==
"""
b = bytes(b'ABCD')
self.assertEqual(b, b'ABCD')
self.assertTrue(b == b'ABCD')
self.assertEqual(b'ABCD', b)
self.assertEqual(b, b)
self.assertFalse(b == b'ABC')
self.assertFalse(b == bytes(b'ABC'))
self.assertFalse(b == u'ABCD')
self.assertFalse(b == str('ABCD'))
# Fails:
# self.assertFalse(u'ABCD' == b)
self.assertFalse(str('ABCD') == b)
self.assertFalse(b == list(b))
self.assertFalse(b == str(b))
self.assertFalse(b == u'ABC')
self.assertFalse(bytes(b'Z') == 90)
def test_ne(self):
b = bytes(b'ABCD')
self.assertFalse(b != b)
self.assertFalse(b != b'ABCD')
self.assertTrue(b != b'ABCDEFG')
self.assertTrue(b != bytes(b'ABCDEFG'))
self.assertTrue(b'ABCDEFG' != b)
# self.assertTrue(b'ABCD' != u'ABCD')
self.assertTrue(b != u'ABCD')
self.assertTrue(b != u'ABCDE')
self.assertTrue(bytes(b'') != str(u''))
self.assertTrue(str(u'') != bytes(b''))
self.assertTrue(b != list(b))
self.assertTrue(b != str(b))
def test_hash(self):
d = {}
b = bytes(b'ABCD')
native_b = b'ABCD'
s = str('ABCD')
native_s = u'ABCD'
d[b] = b
d[s] = s
self.assertEqual(len(d), 2)
# This should overwrite d[s] but not d[b]:
d[native_s] = native_s
self.assertEqual(len(d), 2)
# This should overwrite d[native_s] again:
d[s] = s
self.assertEqual(len(d), 2)
self.assertEqual(set(d.keys()), set([s, b]))
@unittest.expectedFailure
def test_hash_with_native_types(self):
# Warning: initializing the dict with native Py2 types throws the
# hashing out:
d = {u'ABCD': u'ABCD', b'ABCD': b'ABCD'}
# On Py2: len(d) == 1
b = bytes(b'ABCD')
s = str('ABCD')
d[s] = s
d[b] = b
# Fails:
self.assertEqual(len(d) > 1)
def test_add(self):
b = bytes(b'ABC')
c = bytes(b'XYZ')
d = b + c
self.assertTrue(isinstance(d, bytes))
self.assertEqual(d, b'ABCXYZ')
f = b + b'abc'
self.assertTrue(isinstance(f, bytes))
self.assertEqual(f, b'ABCabc')
g = b'abc' + b
self.assertTrue(isinstance(g, bytes))
self.assertEqual(g, b'abcABC')
def test_cmp(self):
b = bytes(b'ABC')
with self.assertRaises(TypeError):
b > 3
with self.assertRaises(TypeError):
b > u'XYZ'
with self.assertRaises(TypeError):
b <= 3
with self.assertRaises(TypeError):
b >= int(3)
with self.assertRaises(TypeError):
b < 3.3
with self.assertRaises(TypeError):
b > (3.3 + 3j)
with self.assertRaises(TypeError):
b >= (1, 2)
with self.assertRaises(TypeError):
b <= [1, 2]
def test_mul(self):
b = bytes(b'ABC')
c = b * 4
self.assertTrue(isinstance(c, bytes))
self.assertEqual(c, b'ABCABCABCABC')
d = b * int(4)
self.assertTrue(isinstance(d, bytes))
self.assertEqual(d, b'ABCABCABCABC')
if utils.PY2:
e = b * long(4)
self.assertTrue(isinstance(e, bytes))
self.assertEqual(e, b'ABCABCABCABC')
def test_rmul(self):
b = bytes(b'XYZ')
c = 3 * b
self.assertTrue(isinstance(c, bytes))
self.assertEqual(c, b'XYZXYZXYZ')
d = b * int(3)
self.assertTrue(isinstance(d, bytes))
self.assertEqual(d, b'XYZXYZXYZ')
if utils.PY2:
e = long(3) * b
self.assertTrue(isinstance(e, bytes))
self.assertEqual(e, b'XYZXYZXYZ')
def test_slice(self):
b = bytes(b'ABCD')
c1 = b[:]
self.assertTrue(isinstance(c1, bytes))
self.assertTrue(c1 == b)
# The following is not true, whereas it is true normally on Py2 and
# Py3. Does this matter?:
# self.assertTrue(c1 is b)
c2 = b[10:]
self.assertTrue(isinstance(c2, bytes))
self.assertTrue(c2 == bytes(b''))
self.assertTrue(c2 == b'')
c3 = b[:0]
self.assertTrue(isinstance(c3, bytes))
self.assertTrue(c3 == bytes(b''))
self.assertTrue(c3 == b'')
c4 = b[:1]
self.assertTrue(isinstance(c4, bytes))
self.assertTrue(c4 == bytes(b'A'))
self.assertTrue(c4 == b'A')
c5 = b[:-1]
self.assertTrue(isinstance(c5, bytes))
self.assertTrue(c5 == bytes(b'ABC'))
self.assertTrue(c5 == b'ABC')
def test_bytes_frozenset(self):
_ALWAYS_SAFE = bytes(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-') # from Py3.3's urllib.parse
s = frozenset(_ALWAYS_SAFE)
self.assertTrue(65 in s)
self.assertFalse(64 in s)
# Convert back to bytes
b1 = bytes(s)
self.assertTrue(65 in b1)
self.assertEqual(set(b1), set(_ALWAYS_SAFE))
def test_bytes_within_range(self):
"""
Python 3 does this:
>>> bytes([255, 254, 256])
ValueError
...
ValueError: bytes must be in range(0, 256)
Ensure our bytes() constructor has the same behaviour
"""
b1 = bytes([254, 255])
self.assertEqual(b1, b'\xfe\xff')
with self.assertRaises(ValueError):
b2 = bytes([254, 255, 256])
def test_bytes_hasattr_encode(self):
"""
This test tests whether hasattr(b, 'encode') is False, like it is on Py3.
"""
b = bytes(b'abcd')
self.assertFalse(hasattr(b, 'encode'))
self.assertTrue(hasattr(b, 'decode'))
def test_quote_from_bytes(self):
"""
This test was failing in the backported urllib.parse module in quote_from_bytes
"""
empty = bytes([])
self.assertEqual(empty, b'')
self.assertTrue(type(empty), bytes)
empty2 = bytes(())
self.assertEqual(empty2, b'')
self.assertTrue(type(empty2), bytes)
safe = bytes(u'Philosopher guy: 孔子. More text here.'.encode('utf-8'))
safe = bytes([c for c in safe if c < 128])
self.assertEqual(safe, b'Philosopher guy: . More text here.')
self.assertTrue(type(safe), bytes)
def test_rstrip(self):
b = bytes(b'abcd')
c = b.rstrip(b'd')
self.assertEqual(c, b'abc')
self.assertEqual(type(c), type(b))
def test_maketrans(self):
"""
Issue #51.
Test is from Py3.3.5.
"""
transtable = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff'
self.assertEqual(bytes.maketrans(b'', b''), transtable)
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(bytes.maketrans(b'abc', b'xyz'), transtable)
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz'
self.assertEqual(bytes.maketrans(b'\375\376\377', b'xyz'), transtable)
self.assertRaises(ValueError, bytes.maketrans, b'abc', b'xyzq')
self.assertRaises(TypeError, bytes.maketrans, 'abc', 'def')
# def test_mod(self):
# """
# From Py3.5 test suite (post-PEP 461).
#
# The bytes mod code is in _PyBytes_Format() in bytesobject.c in Py3.5.
# """
# b = b'hello, %b!'
# orig = b
# b = b % b'world'
# self.assertEqual(b, b'hello, world!')
# self.assertEqual(orig, b'hello, %b!')
# self.assertFalse(b is orig)
# b = b'%s / 100 = %d%%'
# a = b % (b'seventy-nine', 79)
# self.assertEqual(a, b'seventy-nine / 100 = 79%')
# def test_imod(self):
# """
# From Py3.5 test suite (post-PEP 461)
# """
# # if (3, 0) <= sys.version_info[:2] < (3, 5):
# # raise unittest.SkipTest('bytes % not yet implemented on Py3.0-3.4')
# b = bytes(b'hello, %b!')
# orig = b
# b %= b'world'
# self.assertEqual(b, b'hello, world!')
# self.assertEqual(orig, b'hello, %b!')
# self.assertFalse(b is orig)
# b = bytes(b'%s / 100 = %d%%')
# b %= (b'seventy-nine', 79)
# self.assertEqual(b, b'seventy-nine / 100 = 79%')
# def test_mod_pep_461(self):
# """
# Test for the PEP 461 functionality (resurrection of %s formatting for
# bytes).
# """
# b1 = bytes(b'abc%b')
# b2 = b1 % b'def'
# self.assertEqual(b2, b'abcdef')
# self.assertTrue(isinstance(b2, bytes))
# self.assertEqual(type(b2), bytes)
# b3 = b1 % bytes(b'def')
# self.assertEqual(b3, b'abcdef')
# self.assertTrue(isinstance(b3, bytes))
# self.assertEqual(type(b3), bytes)
#
# # %s is supported for backwards compatibility with Py2's str
# b4 = bytes(b'abc%s')
# b5 = b4 % b'def'
# self.assertEqual(b5, b'abcdef')
# self.assertTrue(isinstance(b5, bytes))
# self.assertEqual(type(b5), bytes)
# b6 = b4 % bytes(b'def')
# self.assertEqual(b6, b'abcdef')
# self.assertTrue(isinstance(b6, bytes))
# self.assertEqual(type(b6), bytes)
#
# self.assertEqual(bytes(b'%c') % 48, b'0')
# self.assertEqual(bytes(b'%c') % b'a', b'a')
#
# # For any numeric code %x, formatting of
# # b"%x" % val
# # is supposed to be equivalent to
# # ("%x" % val).encode("ascii")
# for code in b'xdiouxXeEfFgG':
# bytechar = bytes([code])
# pct_str = u"%" + bytechar.decode('ascii')
# for val in range(300):
# self.assertEqual(bytes(b"%" + bytechar) % val,
# (pct_str % val).encode("ascii"))
#
# with self.assertRaises(TypeError):
# bytes(b'%b') % 3.14
# # Traceback (most recent call last):
# # ...
# # TypeError: b'%b' does not accept 'float'
#
# with self.assertRaises(TypeError):
# bytes(b'%b') % 'hello world!'
# # Traceback (most recent call last):
# # ...
# # TypeError: b'%b' does not accept 'str'
#
# self.assertEqual(bytes(b'%a') % 3.14, b'3.14')
#
# self.assertEqual(bytes(b'%a') % b'abc', b"b'abc'")
# self.assertEqual(bytes(b'%a') % bytes(b'abc'), b"b'abc'")
#
# self.assertEqual(bytes(b'%a') % 'def', b"'def'")
#
# # PEP 461 was updated after an Py3.5 alpha release to specify that %r is now supported
# # for compatibility: http://legacy.python.org/dev/peps/pep-0461/#id16
# assert bytes(b'%r' % b'abc') == bytes(b'%a' % b'abc')
#
# # with self.assertRaises(TypeError):
# # bytes(b'%r' % 'abc')
@expectedFailurePY2
def test_multiple_inheritance(self):
"""
Issue #96 (for newbytes instead of newobject)
"""
import collections
class Base(bytes):
pass
class Foo(Base, collections.Container):
def __contains__(self, item):
return False
@expectedFailurePY2
def test_with_metaclass_and_bytes(self):
"""
Issue #91 (for newdict instead of newobject)
"""
from future.utils import with_metaclass
class MetaClass(type):
pass
class TestClass(with_metaclass(MetaClass, bytes)):
pass
def test_surrogateescape_decoding(self):
"""
Tests whether surrogateescape decoding works correctly.
"""
pairs = [(u'\udcc3', b'\xc3'),
(u'\udcff', b'\xff')]
for (s, b) in pairs:
decoded = bytes(b).decode('utf-8', 'surrogateescape')
self.assertEqual(s, decoded)
self.assertTrue(isinstance(decoded, str))
self.assertEqual(b, decoded.encode('utf-8', 'surrogateescape'))
def test_issue_171_part_a(self):
b1 = str(u'abc \u0123 do re mi').encode(u'utf_8')
b2 = bytes(u'abc \u0123 do re mi', u'utf_8')
b3 = bytes(str(u'abc \u0123 do re mi'), u'utf_8')
@expectedFailurePY2
def test_issue_171_part_b(self):
"""
Tests whether:
>>> nativebytes = bytes ; nativestr = str ; from builtins import *
>>> nativebytes(bytes(b'asdf'))[0] == b'a' == b'asdf'
"""
nativebytes = type(b'')
nativestr = type('')
b = nativebytes(bytes(b'asdf'))
self.assertEqual(b, b'asdf')
if __name__ == '__main__':
unittest.main()
| 35.667602 | 765 | 0.56789 |
5b45fb614c1426a29b5b74a46fc5d163a74a8862 | 566 | py | Python | T04/ex06.py | mariogarcc/comphy | 3ab05a07dfa2eb8a1165fca1bdfd9bda6c8e27d3 | [
"CC0-1.0"
] | null | null | null | T04/ex06.py | mariogarcc/comphy | 3ab05a07dfa2eb8a1165fca1bdfd9bda6c8e27d3 | [
"CC0-1.0"
] | null | null | null | T04/ex06.py | mariogarcc/comphy | 3ab05a07dfa2eb8a1165fca1bdfd9bda6c8e27d3 | [
"CC0-1.0"
] | null | null | null | from package import redact_ex
from package import romberg_integrate
import numpy as np
EXERCISE_06 = """\
Make a program that computes the integral of a function f(x)
in an interval [a, b] using the Romberg's rule for integration.
Apply that to the case f(x) = (x**2 + x + 1) * cos(x) and a = 0, b = pi/2\
"""
redact_ex(EXERCISE_06, 6)
def f(x):
return (x**2 + x + 1) * np.cos(x)
interval = [0, np.pi/2]
integral_ab = romberg_integrate(f, interval, prec = 1e-12)
print(f"Integrating f(x) in {interval!s} yields (Romberg):",
integral_ab, sep = '\n') | 22.64 | 74 | 0.667845 |
1e55b592bb5296c5292f8039866703ae268c4af3 | 592 | py | Python | raytorch/texture/metal.py | bostonrwalker/raytorch | 75cafa976e00cd59ff2be25959931acea6367fb7 | [
"MIT"
] | 2 | 2020-10-19T13:55:40.000Z | 2021-01-08T12:53:45.000Z | raytorch/texture/metal.py | bostonrwalker/raytorch | 75cafa976e00cd59ff2be25959931acea6367fb7 | [
"MIT"
] | null | null | null | raytorch/texture/metal.py | bostonrwalker/raytorch | 75cafa976e00cd59ff2be25959931acea6367fb7 | [
"MIT"
] | null | null | null | from raytorch.texture.fresnel_texture import FresnelTexture
from raytorch.core import RGB
import math
class Metal(FresnelTexture):
def __init__(self, surface_col=None, normal_map_path=None):
if surface_col is None:
surface_col = RGB(.3, .3, .3)
def reflectivity(theta):
pr = 0.4 + 0.6 * (theta / (math.pi / 2.)) ** 4
pt = 0.
pa = 0.6 * (1. - (theta / (math.pi / 2.)) ** 4)
return pr, pt, pa
super().__init__(surface_col=surface_col, reflectivity_func=reflectivity, normal_map_path=normal_map_path)
| 29.6 | 114 | 0.614865 |
940305ae286296a1d5507aa4e2fdc0ee38d78046 | 14,649 | py | Python | ambari-agent/src/main/python/ambari_agent/HostInfo.py | wbear2/ambari | a1891193984da47015cd5483b5b95e040677d7df | [
"Apache-2.0"
] | 5 | 2018-06-03T05:19:40.000Z | 2021-04-16T17:10:49.000Z | ambari-agent/src/main/python/ambari_agent/HostInfo.py | wbear2/ambari | a1891193984da47015cd5483b5b95e040677d7df | [
"Apache-2.0"
] | null | null | null | ambari-agent/src/main/python/ambari_agent/HostInfo.py | wbear2/ambari | a1891193984da47015cd5483b5b95e040677d7df | [
"Apache-2.0"
] | 6 | 2019-05-07T13:24:39.000Z | 2021-02-15T14:12:37.000Z | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import glob
import logging
import pwd
import re
import time
import subprocess
import threading
import shlex
import platform
from PackagesAnalyzer import PackagesAnalyzer
from HostCheckReportFileHandler import HostCheckReportFileHandler
from Hardware import Hardware
from ambari_commons import OSCheck, OSConst
import socket
logger = logging.getLogger()
# OS info
OS_VERSION = OSCheck().get_os_major_version()
OS_TYPE = OSCheck.get_os_type()
OS_FAMILY = OSCheck.get_os_family()
# service cmd
SERVICE_CMD = "/sbin/service"
# on ubuntu iptables service is called ufw
if OS_FAMILY == OSConst.DEBIAN_FAMILY:
SERVICE_CMD = "/usr/sbin/service"
class HostInfo:
# List of project names to be used to find alternatives folders etc.
DEFAULT_PROJECT_NAMES = [
"hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia", "nagios",
"oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
"storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
"rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
"webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
]
# List of live services checked for on the host, takes a map of plan strings
DEFAULT_LIVE_SERVICES = [
{OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.DEBIAN_FAMILY: "ntp"}
]
# Set of default users (need to be replaced with the configured user names)
DEFAULT_USERS = [
"nagios", "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
"hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
"hue", "yarn"
]
# Filters used to identify processed
PROC_FILTER = [
"hadoop", "zookeeper"
]
# Additional path patterns to find existing directory
DIRNAME_PATTERNS = [
"/tmp/hadoop-", "/tmp/hsperfdata_"
]
# Default set of directories that are checked for existence of files and folders
DEFAULT_DIRS = [
"/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
]
# Packages that are used to find repos (then repos are used to find other packages)
PACKAGES = [
"hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
]
# Additional packages to look for (search packages that start with these)
ADDITIONAL_PACKAGES = [
"rrdtool", "rrdtool-python", "nagios", "ganglia", "gmond", "gweb", "libconfuse", "ambari-log4j",
"hadoop", "zookeeper", "oozie", "webhcat"
]
# ignore packages from repos whose names start with these strings
IGNORE_PACKAGES_FROM_REPOS = [
"ambari", "installed"
]
# ignore required packages
IGNORE_PACKAGES = [
"epel-release"
]
# ignore repos from the list of repos to be cleaned
IGNORE_REPOS = [
"ambari", "HDP-UTILS"
]
# default timeout for async invoked processes
TIMEOUT_SECONDS = 60
RESULT_UNAVAILABLE = "unable_to_determine"
DEFAULT_SERVICE_NAME = "ntpd"
SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
event = threading.Event()
current_umask = -1
def __init__(self, config=None):
self.packages = PackagesAnalyzer()
self.reportFileHandler = HostCheckReportFileHandler(config)
def dirType(self, path):
if not os.path.exists(path):
return 'not_exist'
elif os.path.islink(path):
return 'sym_link'
elif os.path.isdir(path):
return 'directory'
elif os.path.isfile(path):
return 'file'
return 'unknown'
def hadoopVarRunCount(self):
if not os.path.exists('/var/run/hadoop'):
return 0
pids = glob.glob('/var/run/hadoop/*/*.pid')
return len(pids)
def hadoopVarLogCount(self):
if not os.path.exists('/var/log/hadoop'):
return 0
logs = glob.glob('/var/log/hadoop/*/*.log')
return len(logs)
def etcAlternativesConf(self, projects, etcResults):
if not os.path.exists('/etc/alternatives'):
return []
projectRegex = "'" + '|'.join(projects) + "'"
files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
for conf in files:
result = {}
filePath = os.path.join('/etc/alternatives', conf)
if os.path.islink(filePath):
realConf = os.path.realpath(filePath)
result['name'] = conf
result['target'] = realConf
etcResults.append(result)
def checkLiveServices(self, services, result):
osType = OSCheck.get_os_family()
for service in services:
svcCheckResult = {}
if isinstance(service, dict):
serviceName = service[osType]
else:
serviceName = service
service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
service_check_live[1] = serviceName
svcCheckResult['name'] = serviceName
svcCheckResult['status'] = "UNKNOWN"
svcCheckResult['desc'] = ""
try:
osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = osStat.communicate()
if 0 != osStat.returncode:
svcCheckResult['status'] = "Unhealthy"
svcCheckResult['desc'] = out
if len(out) == 0:
svcCheckResult['desc'] = err
else:
svcCheckResult['status'] = "Healthy"
except Exception, e:
svcCheckResult['status'] = "Unhealthy"
svcCheckResult['desc'] = repr(e)
result.append(svcCheckResult)
def checkUsers(self, users, results):
f = open('/etc/passwd', 'r')
for userLine in f:
fields = userLine.split(":")
if fields[0] in users:
result = {}
homeDir = fields[5]
result['name'] = fields[0]
result['homeDir'] = fields[5]
result['status'] = "Available"
if not os.path.exists(homeDir):
result['status'] = "Invalid home directory"
results.append(result)
def osdiskAvailableSpace(self, path):
diskInfo = {}
try:
df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
dfdata = df.communicate()[0]
return Hardware.extractMountInfo(dfdata.splitlines()[-1])
except:
pass
return diskInfo
def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
foldersToIgnore = []
for user in existingUsers:
foldersToIgnore.append(user['homeDir'])
try:
for dirName in basePaths:
for project in projectNames:
path = os.path.join(dirName.strip(), project.strip())
if not path in foldersToIgnore and os.path.exists(path):
obj = {}
obj['type'] = self.dirType(path)
obj['name'] = path
dirs.append(obj)
except:
pass
def javaProcs(self, list):
try:
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
cmd = cmd.replace('\0', ' ')
if not 'AmbariServer' in cmd:
if 'java' in cmd:
dict = {}
dict['pid'] = int(pid)
dict['hadoop'] = False
for filter in self.PROC_FILTER:
if filter in cmd:
dict['hadoop'] = True
dict['command'] = cmd.strip()
for line in open(os.path.join('/proc', pid, 'status')):
if line.startswith('Uid:'):
uid = int(line.split()[1])
dict['user'] = pwd.getpwuid(uid).pw_name
list.append(dict)
except:
pass
pass
def getReposToRemove(self, repos, ignoreList):
reposToRemove = []
for repo in repos:
addToRemoveList = True
for ignoreRepo in ignoreList:
if self.packages.nameMatch(ignoreRepo, repo):
addToRemoveList = False
continue
if addToRemoveList:
reposToRemove.append(repo)
return reposToRemove
def getUMask(self):
if (self.current_umask == -1):
self.current_umask = os.umask(self.current_umask)
os.umask(self.current_umask)
return self.current_umask
else:
return self.current_umask
def getFirewallObject(self):
if OS_TYPE == OSConst.OS_UBUNTU:
return UbuntuFirewallChecks()
elif OS_TYPE == OSConst.OS_FEDORA and int(OS_VERSION) >= 18:
return Fedora18FirewallChecks()
elif OS_FAMILY == OSConst.SUSE_FAMILY:
return SuseFirewallChecks()
else:
return FirewallChecks()
def getFirewallObjectTypes(self):
# To support test code, so tests can loop through the types
return (FirewallChecks,
UbuntuFirewallChecks,
Fedora18FirewallChecks,
SuseFirewallChecks)
def checkIptables(self):
return self.getFirewallObject().check_iptables()
""" Return various details about the host
componentsMapped: indicates if any components are mapped to this host
commandsInProgress: indicates if any commands are in progress
"""
def register(self, dict, componentsMapped=True, commandsInProgress=True):
dict['hostHealth'] = {}
java = []
self.javaProcs(java)
dict['hostHealth']['activeJavaProcs'] = java
liveSvcs = []
self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
dict['hostHealth']['liveServices'] = liveSvcs
dict['umask'] = str(self.getUMask())
dict['iptablesIsRunning'] = self.checkIptables()
dict['reverseLookup'] = self.checkReverseLookup()
# If commands are in progress or components are already mapped to this host
# Then do not perform certain expensive host checks
if componentsMapped or commandsInProgress:
dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
dict['installedPackages'] = []
dict['alternatives'] = []
dict['stackFoldersAndFiles'] = []
dict['existingUsers'] = []
else:
etcs = []
self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
dict['alternatives'] = etcs
existingUsers = []
self.checkUsers(self.DEFAULT_USERS, existingUsers)
dict['existingUsers'] = existingUsers
dirs = []
self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
dict['stackFoldersAndFiles'] = dirs
installedPackages = []
availablePackages = []
self.packages.allInstalledPackages(installedPackages)
self.packages.allAvailablePackages(availablePackages)
repos = []
self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
self.IGNORE_PACKAGES_FROM_REPOS, repos)
packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
self.ADDITIONAL_PACKAGES, installedPackages)
allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
dict['existingRepos'] = repos
self.reportFileHandler.writeHostCheckFile(dict)
pass
# The time stamp must be recorded at the end
dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
pass
def checkReverseLookup(self):
"""
Check if host fqdn resolves to current host ip
"""
try:
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
host_fqdn = socket.getfqdn()
fqdn_ip = socket.gethostbyname(host_fqdn)
return host_ip == fqdn_ip
except socket.error:
pass
return False
class FirewallChecks(object):
def __init__(self):
self.FIREWALL_SERVICE_NAME = "iptables"
self.SERVICE_CMD = SERVICE_CMD
self.SERVICE_SUBCMD = "status"
def get_command(self):
return "%s %s %s" % (self.SERVICE_CMD, self.FIREWALL_SERVICE_NAME, self.SERVICE_SUBCMD)
def check_result(self, retcode, out, err):
return retcode == 0
def check_iptables(self):
retcode, out, err = self.run_os_command(self.get_command())
return self.check_result(retcode, out, err)
def get_running_result(self):
# To support test code. Expected ouput from run_os_command.
return (0, "", "")
def get_stopped_result(self):
# To support test code. Expected output from run_os_command.
return (3, "", "")
def run_os_command(self, cmd):
if type(cmd) == str:
cmd = shlex.split(cmd)
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
return process.returncode, stdoutdata, stderrdata
except OSError:
return self.get_stopped_result()
class UbuntuFirewallChecks(FirewallChecks):
def __init__(self):
super(UbuntuFirewallChecks, self).__init__()
self.FIREWALL_SERVICE_NAME = "ufw"
self.SERVICE_CMD = 'service'
def check_result(self, retcode, out, err):
# On ubuntu, the status command returns 0 whether running or not
return out and len(out) > 0 and out.strip() != "ufw stop/waiting"
def get_running_result(self):
# To support test code. Expected ouput from run_os_command.
return (0, "ufw start/running", "")
def get_stopped_result(self):
# To support test code. Expected output from run_os_command.
return (0, "ufw stop/waiting", "")
class Fedora18FirewallChecks(FirewallChecks):
def __init__(self):
self.FIREWALL_SERVICE_NAME = "firewalld.service"
def get_command(self):
return "systemctl is-active firewalld.service"
class SuseFirewallChecks(FirewallChecks):
def __init__(self):
self.FIREWALL_SERVICE_NAME = "SuSEfirewall2"
def get_command(self):
return "/sbin/SuSEfirewall2 status"
def main(argv=None):
h = HostInfo()
struct = {}
h.register(struct)
print struct
if __name__ == '__main__':
main()
| 31.503226 | 110 | 0.666598 |
47d4a8cdde1da0995d14e7c745c60edb626725bf | 7,615 | py | Python | phaseblock.py | ding-lab/SomaticHaplotype | 1cec343d1129c4f5052e5f5250bb7d3e7ff25a36 | [
"MIT"
] | null | null | null | phaseblock.py | ding-lab/SomaticHaplotype | 1cec343d1129c4f5052e5f5250bb7d3e7ff25a36 | [
"MIT"
] | null | null | null | phaseblock.py | ding-lab/SomaticHaplotype | 1cec343d1129c4f5052e5f5250bb7d3e7ff25a36 | [
"MIT"
] | null | null | null | import os
import pickle
import pysam
import vcf
from SomaticHaplotype import *
################################################################################
# bam functions
################################################################################
def extract_read_info(read):
# Given a read from a bam, extract the phase block if it exists, else NA
# Also report the position of the read and read quality metrics
if read.is_duplicate or read.is_qcfail or read.is_secondary or not read.is_proper_pair:
return("read is bad quality")
elif read.has_tag("PS"): # read is part of phase block
tags_dict = {x:y for (x,y) in read.get_tags()}
if "MI" not in tags_dict:
tags_dict["MI"] = None
return(tags_dict)
else: # read is not part of a phase block
return("read is not part of phase block")
def extract_phase_blocks_from_bam(bam_filename, chr = None, start_bp = None, end_bp = None):
samfile = pysam.AlignmentFile(bam_filename, "rb")
phase_block_dict = {"n_total_reads" : 0, "n_reads_bad_quality" : 0,
"n_reads_good_quality" : 0, "n_reads_phased" : 0, "n_reads_not_phased" : 0,
"phase_blocks" : {} }
for read in samfile.fetch(chr, start_bp, end_bp):
phase_block_dict["n_total_reads"] += 1
read_info = extract_read_info(read)
if read_info == "read is bad quality":
phase_block_dict["n_reads_bad_quality"] += 1
elif read_info == "read is not part of phase block":
phase_block_dict["n_reads_not_phased"] += 1
phase_block_dict["n_reads_good_quality"] += 1
else:
phase_block_dict["n_reads_phased"] += 1
phase_block_dict["n_reads_good_quality"] += 1
pb_id = read.reference_name + ":" + str(read_info["PS"])
if pb_id in phase_block_dict["phase_blocks"]:
if int(read.reference_start) < phase_block_dict["phase_blocks"][pb_id].return_Start():
phase_block_dict["phase_blocks"][pb_id].update_Start(read.reference_start)
if int(read.reference_end) > phase_block_dict["phase_blocks"][pb_id].return_End():
phase_block_dict["phase_blocks"][pb_id].update_End(read.reference_end)
phase_block_dict["phase_blocks"][pb_id].add_SingleEndRead()
if read_info["HP"] == 1:
phase_block_dict["phase_blocks"][pb_id].add_SupportH1()
phase_block_dict["phase_blocks"][pb_id].add_MoleculeH1(read_info["MI"])
elif read_info["HP"] == 2:
phase_block_dict["phase_blocks"][pb_id].add_SupportH2()
phase_block_dict["phase_blocks"][pb_id].add_MoleculeH2(read_info["MI"])
else:
sys.exit("Whoa no H1 or H2 support for\n" + str(read))
else:
phase_block_dict["phase_blocks"][pb_id] = PhaseBlock(pb_id = pb_id,
chromosome = read.reference_name,
start_bp = int(read.reference_start),
end_bp = int(read.reference_end))
phase_block_dict["phase_blocks"][pb_id].add_SingleEndRead()
if read_info["HP"] == 1:
phase_block_dict["phase_blocks"][pb_id].add_SupportH1()
phase_block_dict["phase_blocks"][pb_id].add_MoleculeH1(read_info["MI"])
elif read_info["HP"] == 2:
phase_block_dict["phase_blocks"][pb_id].add_SupportH2()
phase_block_dict["phase_blocks"][pb_id].add_MoleculeH2(read_info["MI"])
else:
sys.exit("Whoa no H1 or H2 support for\n" + str(read))
samfile.close()
return(phase_block_dict)
################################################################################
# VCF functions
################################################################################
def extract_variants_from_VCF(vcf_filename, sample_id, chr = None, start_bp = None, end_bp = None):
# build a dictionary of phase blocks present in VCF
# only includes variants that are phased heterozygotes (useful for phase-related activities)
this_vcf = vcf.Reader( filename = vcf_filename )
variant_dict = {} # dictionary to hold all variants
for record in this_vcf.fetch( str(chr) , start_bp, end_bp ): # loop over each record in VCF
this_variant = Variant(record, sample_id)
if this_variant.return_VariantKey() in variant_dict: # check if variant already in dictionary
variant_dict[this_variant.return_VariantKey()].append(this_variant)
else:
variant_dict[this_variant.return_VariantKey()] = [this_variant]
return(variant_dict)
################################################################################
# Combined bam phase block and VCF variants functions
################################################################################
def add_variants_to_phase_blocks(bam_phase_block_dictionary, vcf_variants_dictionary):
bam_phase_block_dictionary["phase_blocks"]["variant_not_phased_heterozygote"] = {}
bam_phase_block_dictionary["phase_blocks"]["variant_phase_block_not_in_bam"] = {}
for variant_key in vcf_variants_dictionary:
for variant in vcf_variants_dictionary[variant_key]:
variant_pbid = variant.return_VariantPhaseBlock()
if not variant.return_IsPhasedHeterozygote():
if variant_key in bam_phase_block_dictionary["phase_blocks"]["variant_not_phased_heterozygote"]:
bam_phase_block_dictionary["phase_blocks"]["variant_not_phased_heterozygote"][variant_key].append(variant)
else:
bam_phase_block_dictionary["phase_blocks"]["variant_not_phased_heterozygote"][variant_key] = [variant]
elif variant_pbid in bam_phase_block_dictionary["phase_blocks"]:
bam_phase_block_dictionary["phase_blocks"][variant_pbid].add_Variant(variant)
else:
if variant_key in bam_phase_block_dictionary["phase_blocks"]["variant_phase_block_not_in_bam"]:
bam_phase_block_dictionary["phase_blocks"]["variant_phase_block_not_in_bam"][variant_key].append(variant)
else:
bam_phase_block_dictionary["phase_blocks"]["variant_phase_block_not_in_bam"][variant_key] = [variant]
for pbid in bam_phase_block_dictionary["phase_blocks"]:
if pbid not in ["variant_not_phased_heterozygote", "variant_phase_block_not_in_bam"]:
bam_phase_block_dictionary["phase_blocks"][pbid].add_FirstVariantPosition()
bam_phase_block_dictionary["phase_blocks"][pbid].add_LastVariantPosition()
################################################################################
# main
################################################################################
def main(args):
# parse the genomic range argument
chrom = args.range.split(":")[0]
try:
start = int(args.range.split(":")[1].split("-")[0])
except:
start = None
try:
end = int(args.range.split(":")[1].split("-")[1])
except:
end = None
# bam phase block dictionary
bam_phase_block_dictionary = extract_phase_blocks_from_bam(
args.bam,
chr = chrom,
start_bp = start,
end_bp = end)
# vcf variant dictionary
vcf_variants_dictionary = extract_variants_from_VCF(
args.vcf,
args.vcf_id,
chr = chrom,
start_bp = start,
end_bp = end)
# add variants to bam phase block dictionary
add_variants_to_phase_blocks(bam_phase_block_dictionary, vcf_variants_dictionary)
os.makedirs(args.output_directory, exist_ok = True)
output_file_path = os.path.join(args.output_directory, args.output_prefix + ".phaseblock.pkl")
output_file = open(output_file_path, 'wb')
pickle.dump(bam_phase_block_dictionary, output_file, pickle.HIGHEST_PROTOCOL)
pickle.dump(vcf_variants_dictionary, output_file, pickle.HIGHEST_PROTOCOL)
output_file.close()
if __name__ == '__main__':
main(args)
| 42.541899 | 116 | 0.65371 |
ab758462490f700ba63d9b9af639c9ecfd167433 | 12,006 | py | Python | tpRigToolkit/tools/controlrig/dccs/maya/server.py | tpRigToolkit/tpRigToolkit-tools-controlrig | 46e15377d64418bcc4bc8944c3f9537603fa0e9d | [
"MIT"
] | null | null | null | tpRigToolkit/tools/controlrig/dccs/maya/server.py | tpRigToolkit/tpRigToolkit-tools-controlrig | 46e15377d64418bcc4bc8944c3f9537603fa0e9d | [
"MIT"
] | null | null | null | tpRigToolkit/tools/controlrig/dccs/maya/server.py | tpRigToolkit/tpRigToolkit-tools-controlrig | 46e15377d64418bcc4bc8944c3f9537603fa0e9d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains control rig server implementation for Maya
"""
from __future__ import print_function, division, absolute_import
from tpDcc import dcc
from tpDcc.core import server
from tpDcc.libs.curves.core import lib
from tpDcc.dccs.maya.core import filtertypes, shape as shape_utils
from tpRigToolkit.libs.controlrig.core import controllib
class ControlRigServer(server.DccServer, object):
PORT = 13144
def __init__(self, *args, **kwargs):
super(ControlRigServer, self).__init__(*args, **kwargs)
# Force register DCC commands
lib.CurvesLib.load()
def update_selected_nodes(self, data, reply):
nodes = data.get('nodes', list())
deselect = data.get('deselect', True)
selected_nodes = dcc.selected_nodes(full_path=True) or list()
if selected_nodes:
valid_nodes = selected_nodes
else:
valid_nodes = list()
for node in nodes:
if not node or not dcc.node_exists(node):
continue
valid_nodes.append(node)
if selected_nodes and deselect:
dcc.clear_selection()
reply['success'] = True
reply['result'] = valid_nodes
def filter_transforms_with_shapes(self, data, reply):
nodes = data.get('nodes', list())
children = data.get('hierarchy', False)
valid_nodes = list()
if not nodes:
reply['success'] = True
return valid_nodes
for node in nodes:
if not node or not dcc.node_exists(node):
continue
valid_nodes.append(node)
if not valid_nodes:
reply['success'] = True
return valid_nodes
transforms_with_shapes = filtertypes.filter_transforms_with_shapes(
valid_nodes, children=children, shape_type='nurbsCurve') or list()
reply['success'] = True
reply['result'] = transforms_with_shapes
@dcc.undo_decorator()
def update_display_state(self, data, reply):
nodes = data.get('nodes', list())
display_index = data.get('display_index', 0) # 0 = Normal; 1 = Template; 2 = Reference
nodes = nodes or dcc.selected_nodes(full_path=True)
if nodes:
for obj in nodes:
dcc.clean_construction_history(obj)
shapes = dcc.list_children_shapes(obj, all_hierarchy=True)
for shape in shapes:
dcc.set_attribute_value(shape, 'overrideEnabled', True)
dcc.set_attribute_value(shape, 'overrideDisplayType', display_index)
if display_index == 0:
dcc.set_attribute_value(shape, 'overrideEnabled', False)
reply['success'] = True
@dcc.undo_decorator()
def set_index_color(self, data, reply):
nodes = data.get('nodes', list())
index = data.get('index', 0)
nodes = nodes or dcc.selected_nodes()
if nodes:
for obj in nodes:
shapes = dcc.list_children_shapes(obj, all_hierarchy=True)
if not shapes:
continue
for shape in shapes:
if not dcc.attribute_exists(shape, 'overrideEnabled'):
continue
if not dcc.attribute_exists(shape, 'overrideColor'):
continue
if dcc.attribute_exists(shape, 'overrideRGBColors'):
dcc.set_attribute_value(shape, 'overrideRGBColors', False)
dcc.set_attribute_value(shape, 'overrideEnabled', True)
dcc.set_attribute_value(shape, 'overrideColor', index)
if index == 0:
dcc.set_attribute_value(shape, 'overrideEnabled', False)
reply['success'] = True
@dcc.undo_decorator()
def set_rgb_color(self, data, reply):
nodes = data.get('nodes', list())
color = data.get('color', list())
if not nodes:
nodes = dcc.selected_nodes()
if nodes:
for obj in nodes:
shapes = dcc.list_children_shapes(obj, all_hierarchy=True)
if not shapes:
continue
if dcc.attribute_exists(obj, 'color'):
dcc.set_attribute_value(obj, 'color', [color[0], color[1], color[2]])
for shape in shapes:
override_enabled = dcc.get_attribute_value(shape, 'overrideEnabled')
if override_enabled:
dcc.set_attribute_value(shape, 'overrideEnabled', False)
dcc.set_attribute_value(shape, 'overrideEnabled', True)
try:
dcc.set_attribute_value(
shape, 'overrideColorRGB', [color[0], color[1], color[2]])
except Exception:
pass
else:
for shape in shapes:
if not dcc.attribute_exists(shape, 'overrideEnabled'):
continue
if not dcc.attribute_exists(shape, 'overrideRGBColors'):
continue
dcc.set_attribute_value(shape, 'overrideRGBColors', True)
dcc.set_attribute_value(shape, 'overrideEnabled', True)
dcc.set_attribute_value(
shape, 'overrideColorRGB', [color[0], color[1], color[2]])
reply['success'] = True
def get_joint_radius(self, data, reply):
result = 1.0
joint_nodes = dcc.selected_nodes_of_type('joint')
if joint_nodes:
result = dcc.get_attribute_value(joint_nodes[0], 'radius')
reply['success'] = True
reply['result'] = result
@dcc.undo_decorator()
def create_control(self, data, reply):
control_data = data['control_data']
select_created_control = data.get('select_created_control', False)
if not control_data:
reply['success'] = False
return
curves = controllib.create_control_curve(**control_data)
if not curves:
reply['success'] = False
return
if select_created_control:
dcc.select_node(curves[0], replace_selection=False)
reply['success'] = True
reply['result'] = curves
@dcc.undo_decorator()
def create_control_text(self, data, reply):
text = data['text']
font = data['font']
if not text:
reply['msg'] = 'Impossible to create control text because no text defined'
reply['success'] = False
return
if not font:
reply['msg'] = 'Impossible to create control text because no font defined'
reply['success'] = False
return
ccs = controllib.create_text_control(text=text, font=font)
reply['success'] = True
reply['result'] = ccs
@dcc.undo_decorator()
def replace_control_curves(self, data, reply):
target_objects = data['target_objects']
control_type = data['control_type']
controls_path = data['controls_path']
keep_color = data['keep_color']
new_controls = list()
for control_name in target_objects:
new_control = controllib.replace_control_curves(
control_name, control_type=control_type, controls_path=controls_path, keep_color=keep_color)
new_controls.append(new_control)
reply['result'] = new_controls
reply['success'] = True
@dcc.undo_decorator()
@dcc.suspend_refresh_decorator()
def mirror_control(self, data, reply):
mirror_plane = data['mirror_plane']
mirror_color = data['mirror_color']
from_name = data['from_name']
to_name = data['to_name']
mirror_mode = data['mirror_mode']
mirror_replace = data['mirror_replace']
keep_mirror_color = data['keep_mirror_color']
mirror_axis = mirror_plane[0]
nodes = data.get('nodes', list())
if not nodes:
nodes = dcc.selected_nodes()
if not nodes:
reply['msg'] = 'No nodes selected to mirror'
reply['success'] = False
return
mirrored_controls = controllib.mirror_controls(
nodes, mirror_axis=mirror_axis, mirror_mode=mirror_mode, mirror_color=mirror_color,
mirror_replace=mirror_replace, from_name=from_name, to_name=to_name, keep_color=keep_mirror_color)
reply['result'] = mirrored_controls
reply['success'] = True
def get_control_color(self, data, reply):
filter_type = data['filter_type'] or filtertypes.CURVE_FILTER_TYPE
curve_transforms = filtertypes.filter_by_type(
filter_type, search_hierarchy=False, selection_only=True, dag=False, remove_maya_defaults=False,
transforms_only=True, keep_order=True)
if not curve_transforms:
reply['msg'] = 'Impossible to get control color. Please select at least one curve object (transform)'
reply['success'] = False
return
first_shape_node = shape_utils.filter_shapes_in_list(curve_transforms)[0]
control_color = dcc.node_rgb_color(first_shape_node, linear=True)
# We return the color in 0 to 255 range
convert = True
for color_channel in control_color:
if color_channel > 1.0:
convert = False
break
if convert:
if control_color and isinstance(control_color, (list, tuple)):
control_color = [color_channel * 255 for color_channel in control_color]
reply['result'] = control_color
reply['success'] = True
@dcc.undo_decorator()
def select_controls_by_color(self, data, reply):
filter_type = data['filter_type'] or filtertypes.CURVE_FILTER_TYPE
control_color = data['rgb_color']
curve_transforms = None
if not control_color:
curve_transforms = filtertypes.filter_by_type(
filter_type, search_hierarchy=False, selection_only=True, dag=False, remove_maya_defaults=False,
transforms_only=True, keep_order=True)
if not curve_transforms:
curve_transforms = filtertypes.filter_by_type(
filter_type, search_hierarchy=False, selection_only=False, dag=False, remove_maya_defaults=False,
transforms_only=True, keep_order=True)
if not curve_transforms:
reply['msg'] = 'No curve objects found in the scene with the given color'
reply['success'] = False
return
if not control_color:
first_shape_node = shape_utils.filter_shapes_in_list(curve_transforms)[0]
control_color = dcc.node_rgb_color(first_shape_node, linear=True)
if not control_color:
reply['msg'] = 'No color given to select objects based in its value'
reply['success'] = False
return
nodes = dcc.select_nodes_by_rgb_color(node_rgb_color=control_color)
reply['result'] = nodes
reply['success'] = True
def scale_control(self, data, reply):
nodes = data.get('nodes', list())
if not nodes:
nodes = dcc.selected_nodes()
if not nodes:
reply['msg'] = 'No controls selected to scale'
reply['success'] = False
return
value = data.get('value', 1.0)
undo = data.get('undo', True)
if undo:
controllib.scale_controls(value, controls=nodes)
else:
for node in nodes:
dcc.scale_transform_shapes(node, value)
reply['success'] = True
| 37.285714 | 113 | 0.589872 |
e0bbf24236d6f5d82917dffb2c364cefc8c0b80a | 807 | py | Python | manage.py | samsoluoch/Instagram | ea6305c0592c8efe173cf3e6b5f1c477650678db | [
"MIT"
] | 1 | 2019-10-02T04:22:23.000Z | 2019-10-02T04:22:23.000Z | manage.py | samsoluoch/Instagram | ea6305c0592c8efe173cf3e6b5f1c477650678db | [
"MIT"
] | 8 | 2020-06-05T20:52:19.000Z | 2022-03-12T00:15:13.000Z | manage.py | samsoluoch/Instagram | ea6305c0592c8efe173cf3e6b5f1c477650678db | [
"MIT"
] | 2 | 2021-12-07T02:51:32.000Z | 2021-12-13T13:30:27.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Instagram.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.086957 | 77 | 0.643123 |
289ae5733586fbf0ef57bf8e2acbe2591a6d3ec3 | 2,647 | py | Python | yt_dlp/extractor/musescore.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/extractor/musescore.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/extractor/musescore.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | from .common import InfoExtractor
class MuseScoreIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?musescore\.com/(?:user/\d+|[^/]+)(?:/scores)?/(?P<id>[^#&?]+)'
_TESTS = [{
'url': 'https://musescore.com/user/73797/scores/142975',
'info_dict': {
'id': '142975',
'ext': 'mp3',
'title': 'WA Mozart Marche Turque (Turkish March fingered)',
'description': 'md5:7ede08230e4eaabd67a4a98bb54d07be',
'thumbnail': r're:https?://(?:www\.)?musescore\.com/.*\.png[^$]+',
'uploader': 'PapyPiano',
'creator': 'Wolfgang Amadeus Mozart',
}
}, {
'url': 'https://musescore.com/user/36164500/scores/6837638',
'info_dict': {
'id': '6837638',
'ext': 'mp3',
'title': 'Sweet Child O\' Mine – Guns N\' Roses sweet child',
'description': 'md5:4dca71191c14abc312a0a4192492eace',
'thumbnail': r're:https?://(?:www\.)?musescore\.com/.*\.png[^$]+',
'uploader': 'roxbelviolin',
'creator': 'Guns N´Roses Arr. Roxbel Violin',
}
}, {
'url': 'https://musescore.com/classicman/fur-elise',
'info_dict': {
'id': '33816',
'ext': 'mp3',
'title': 'Für Elise – Beethoven',
'description': 'md5:49515a3556d5ecaf9fa4b2514064ac34',
'thumbnail': r're:https?://(?:www\.)?musescore\.com/.*\.png[^$]+',
'uploader': 'ClassicMan',
'creator': 'Ludwig van Beethoven (1770–1827)',
}
}, {
'url': 'https://musescore.com/minh_cuteee/scores/6555384',
'only_matching': True,
}]
def _real_extract(self, url):
webpage = self._download_webpage(url, None)
url = self._og_search_url(webpage) or url
id = self._match_id(url)
mp3_url = self._download_json(f'https://musescore.com/api/jmuse?id={id}&index=0&type=mp3&v2=1', id,
headers={'authorization': '63794e5461e4cfa046edfbdddfccc1ac16daffd2'})['info']['url']
formats = [{
'url': mp3_url,
'ext': 'mp3',
'vcodec': 'none',
}]
return {
'id': id,
'formats': formats,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader': self._html_search_meta('musescore:author', webpage, 'uploader'),
'creator': self._html_search_meta('musescore:composer', webpage, 'composer'),
}
| 40.723077 | 123 | 0.529656 |
4f422b32c89c55d43e7e102db2c9453f8b38dfb1 | 11,164 | py | Python | colour/utilities/tests/test_data_structures.py | timgates42/colour | cea486394e3925718a0f3dd427edc9dd5b674f0c | [
"BSD-3-Clause"
] | null | null | null | colour/utilities/tests/test_data_structures.py | timgates42/colour | cea486394e3925718a0f3dd427edc9dd5b674f0c | [
"BSD-3-Clause"
] | null | null | null | colour/utilities/tests/test_data_structures.py | timgates42/colour | cea486394e3925718a0f3dd427edc9dd5b674f0c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.utilities.data_structures` module.
"""
import numpy as np
import operator
import pickle
import unittest
from colour.utilities import (Structure, Lookup, CaseInsensitiveMapping,
LazyCaseInsensitiveMapping)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestStructure', 'TestLookup', 'TestCaseInsensitiveMapping',
'TestLazyCaseInsensitiveMapping'
]
class TestStructure(unittest.TestCase):
"""
Defines :class:`colour.utilities.data_structures.Structure` class unit
tests methods.
"""
def test_Structure(self):
"""
Tests :class:`colour.utilities.data_structures.Structure` class.
"""
structure = Structure(John='Doe', Jane='Doe')
self.assertIn('John', structure)
self.assertTrue(hasattr(structure, 'John'))
setattr(structure, 'John', 'Nemo')
self.assertEqual(structure['John'], 'Nemo')
structure['John'] = 'Vador'
self.assertEqual(structure['John'], 'Vador')
del structure['John']
self.assertNotIn('John', structure)
self.assertFalse(hasattr(structure, 'John'))
structure.John = 'Doe'
self.assertIn('John', structure)
self.assertTrue(hasattr(structure, 'John'))
del structure.John
self.assertNotIn('John', structure)
self.assertFalse(hasattr(structure, 'John'))
structure = Structure(John=None, Jane=None)
self.assertIsNone(structure.John)
self.assertIsNone(structure['John'])
structure.update(**{'John': 'Doe', 'Jane': 'Doe'})
self.assertEqual(structure.John, 'Doe')
self.assertEqual(structure['John'], 'Doe')
def test_Structure_pickle(self):
"""
Tests :class:`colour.utilities.data_structures.Structure` class
pickling.
"""
structure = Structure(John='Doe', Jane='Doe')
data = pickle.dumps(structure)
data = pickle.loads(data)
self.assertEqual(structure, data)
data = pickle.dumps(structure, pickle.HIGHEST_PROTOCOL)
data = pickle.loads(data)
self.assertEqual(structure, data)
class TestLookup(unittest.TestCase):
"""
Defines :class:`colour.utilities.data_structures.Lookup` class unit tests
methods.
"""
def test_required_methods(self):
"""
Tests presence of required methods.
"""
required_methods = ('keys_from_value', 'first_key_from_value')
for method in required_methods:
self.assertIn(method, dir(Lookup))
def test_keys_from_value(self):
"""
Tests :meth:`colour.utilities.data_structures.Lookup.keys_from_value`
method.
"""
lookup = Lookup(John='Doe', Jane='Doe', Luke='Skywalker')
self.assertListEqual(['Jane', 'John'],
sorted(lookup.keys_from_value('Doe')))
lookup = Lookup(
A=np.array([0, 1, 2]),
B=np.array([0, 1, 2]),
C=np.array([1, 2, 3]))
self.assertListEqual(['A', 'B'],
sorted(
lookup.keys_from_value(np.array([0, 1, 2]))))
def test_first_key_from_value(self):
"""
Tests :meth:`colour.utilities.data_structures.\
Lookup.first_key_from_value` method.
"""
lookup = Lookup(first_name='John', last_name='Doe', gender='male')
self.assertEqual('first_name', lookup.first_key_from_value('John'))
lookup = Lookup(
A=np.array([0, 1, 2]),
B=np.array([1, 2, 3]),
C=np.array([2, 3, 4]))
self.assertEqual('A', lookup.first_key_from_value(np.array([0, 1, 2])))
def test_raise_exception_first_key_from_value(self):
"""
Tests :meth:`colour.utilities.data_structures.\
Lookup.first_key_from_value` method raised exception.
"""
self.assertRaises(IndexError, Lookup().first_key_from_value, 'John')
class TestCaseInsensitiveMapping(unittest.TestCase):
"""
Defines :class:`colour.utilities.data_structures.CaseInsensitiveMapping`
class unit tests methods.
"""
def test_required_attributes(self):
"""
Tests presence of required attributes.
"""
required_attributes = ('data', )
for attribute in required_attributes:
self.assertIn(attribute, dir(CaseInsensitiveMapping))
def test_required_methods(self):
"""
Tests presence of required methods.
"""
required_methods = ('__init__', '__setitem__', '__getitem__',
'__delitem__', '__contains__', '__iter__',
'__len__', '__eq__', '__ne__', '__repr__', 'copy',
'lower_items')
for method in required_methods:
self.assertIn(method, dir(CaseInsensitiveMapping))
def test_data(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.data` property.
"""
self.assertDictEqual(
CaseInsensitiveMapping({
'John': 'Doe',
'Jane': 'Doe'
}).data, {
'jane': ('Jane', 'Doe'),
'john': ('John', 'Doe')
})
def test__setitem__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__setitem__` method.
"""
mapping = CaseInsensitiveMapping()
mapping['John'] = 'Doe'
self.assertEqual(mapping['John'], 'Doe')
self.assertEqual(mapping['john'], 'Doe')
def test__getitem__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__getitem__` method.
"""
mapping = CaseInsensitiveMapping(John='Doe', Jane='Doe')
self.assertEqual(mapping['John'], 'Doe')
self.assertEqual(mapping['john'], 'Doe')
self.assertEqual(mapping['Jane'], 'Doe')
self.assertEqual(mapping['jane'], 'Doe')
def test__delitem__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__delitem__` method.
"""
mapping = CaseInsensitiveMapping(John='Doe', Jane='Doe')
del mapping['john']
self.assertNotIn('John', mapping)
del mapping['Jane']
self.assertNotIn('jane', mapping)
self.assertEqual(len(mapping), 0)
def test__contains__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__contains__` method.
"""
mapping = CaseInsensitiveMapping(John='Doe', Jane='Doe')
self.assertIn('John', mapping)
self.assertIn('john', mapping)
self.assertIn('Jane', mapping)
self.assertIn('jane', mapping)
def test__iter__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__iter__` method.
"""
mapping = CaseInsensitiveMapping(John='Doe', Jane='Doe')
self.assertListEqual(
sorted([item for item in mapping]), ['Jane', 'John'])
def test__len__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__len__` method.
"""
self.assertEqual(len(CaseInsensitiveMapping()), 0)
self.assertEqual(
len(CaseInsensitiveMapping(John='Doe', Jane='Doe')), 2)
def test__eq__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__eq__` method.
"""
mapping1 = CaseInsensitiveMapping(John='Doe', Jane='Doe')
mapping2 = CaseInsensitiveMapping(John='Doe', Jane='Doe')
mapping3 = CaseInsensitiveMapping(john='Doe', jane='Doe')
self.assertEqual(mapping1, mapping2)
self.assertEqual(mapping2, mapping3)
def test_raise_exception__eq__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__eq__` method raised exception.
"""
self.assertRaises(ValueError, operator.eq,
CaseInsensitiveMapping(John='Doe', Jane='Doe'),
['John', 'Doe', 'Jane', 'Doe'])
def test__ne__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__ne__` method.
"""
mapping1 = CaseInsensitiveMapping(John='Doe', Jane='Doe')
mapping2 = CaseInsensitiveMapping(Gi='Doe', Jane='Doe')
self.assertNotEqual(mapping1, mapping2)
def test_raise_exception__ne__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.__ne__` method raised exception.
"""
self.assertRaises(ValueError, operator.ne,
CaseInsensitiveMapping(John='Doe', Jane='Doe'),
['John', 'Doe', 'Jane', 'Doe'])
def test_copy(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.copy` method.
"""
mapping1 = CaseInsensitiveMapping(John='Doe', Jane='Doe')
mapping2 = mapping1.copy()
self.assertEqual(mapping1, mapping2)
self.assertNotEqual(id(mapping1), id(mapping2))
def test_lower_items(self):
"""
Tests :meth:`colour.utilities.data_structures.\
CaseInsensitiveMapping.lower_items` method.
"""
mapping = CaseInsensitiveMapping(John='Doe', Jane='Doe')
self.assertListEqual(
sorted([item for item in mapping.lower_items()]),
[('jane', 'Doe'), ('john', 'Doe')])
class TestLazyCaseInsensitiveMapping(unittest.TestCase):
"""
Defines :class:`colour.utilities.data_structures.\
LazyCaseInsensitiveMapping` class unit tests methods.
"""
def test_required_attributes(self):
"""
Tests presence of required attributes.
"""
required_attributes = ()
for attribute in required_attributes:
self.assertIn(attribute, dir(LazyCaseInsensitiveMapping))
def test_required_methods(self):
"""
Tests presence of required methods.
"""
required_methods = ('__getitem__', )
for method in required_methods:
self.assertIn(method, dir(LazyCaseInsensitiveMapping))
def test__getitem__(self):
"""
Tests :meth:`colour.utilities.data_structures.\
LazyCaseInsensitiveMapping.__getitem__` method.
"""
mapping = LazyCaseInsensitiveMapping(John='Doe', Jane=lambda: 'Doe')
self.assertEqual(mapping['John'], 'Doe')
self.assertEqual(mapping['john'], 'Doe')
self.assertEqual(mapping['Jane'], 'Doe')
self.assertEqual(mapping['jane'], 'Doe')
if __name__ == '__main__':
unittest.main()
| 29.148825 | 79 | 0.609549 |
47fe8bc2edff88cb2c56cb18378a398a0b47fffb | 38,444 | py | Python | python/ray/tests/test_autoscaler.py | eisber/ray | 94a286ef1d8ad5a3093b7f996a811727fa0e2d3e | [
"Apache-2.0"
] | 4 | 2019-10-18T17:44:58.000Z | 2021-04-14T14:37:21.000Z | python/ray/tests/test_autoscaler.py | eisber/ray | 94a286ef1d8ad5a3093b7f996a811727fa0e2d3e | [
"Apache-2.0"
] | 1 | 2022-03-30T17:52:44.000Z | 2022-03-30T17:52:44.000Z | python/ray/tests/test_autoscaler.py | eisber/ray | 94a286ef1d8ad5a3093b7f996a811727fa0e2d3e | [
"Apache-2.0"
] | 1 | 2020-06-26T07:54:25.000Z | 2020-06-26T07:54:25.000Z | import shutil
import tempfile
import threading
import time
import unittest
import yaml
import copy
import ray
import ray.services as services
from ray.autoscaler.autoscaler import StandardAutoscaler, LoadMetrics, \
fillout_defaults, validate_config
from ray.autoscaler.tags import TAG_RAY_NODE_TYPE, TAG_RAY_NODE_STATUS, \
STATUS_UP_TO_DATE, STATUS_UPDATE_FAILED
from ray.autoscaler.node_provider import NODE_PROVIDERS, NodeProvider
from ray.test_utils import RayTestTimeoutException
import pytest
class MockNode:
def __init__(self, node_id, tags):
self.node_id = node_id
self.state = "pending"
self.tags = tags
self.external_ip = "1.2.3.4"
self.internal_ip = "172.0.0.{}".format(self.node_id)
def matches(self, tags):
for k, v in tags.items():
if k not in self.tags or self.tags[k] != v:
return False
return True
class MockProcessRunner:
def __init__(self, fail_cmds=[]):
self.calls = []
self.fail_cmds = fail_cmds
def check_call(self, cmd, *args, **kwargs):
for token in self.fail_cmds:
if token in str(cmd):
raise Exception("Failing command on purpose")
self.calls.append(cmd)
def assert_has_call(self, ip, pattern):
out = ""
for cmd in self.calls:
msg = " ".join(cmd)
if ip in msg:
out += msg
out += "\n"
if pattern in out:
return True
else:
raise Exception("Did not find [{}] in [{}] for {}".format(
pattern, out, ip))
def assert_not_has_call(self, ip, pattern):
out = ""
for cmd in self.calls:
msg = " ".join(cmd)
if ip in msg:
out += msg
out += "\n"
if pattern in out:
raise Exception("Found [{}] in [{}] for {}".format(
pattern, out, ip))
else:
return True
def clear_history(self):
self.calls = []
class MockProvider(NodeProvider):
def __init__(self, cache_stopped=False):
self.mock_nodes = {}
self.next_id = 0
self.throw = False
self.fail_creates = False
self.ready_to_create = threading.Event()
self.ready_to_create.set()
self.cache_stopped = cache_stopped
def non_terminated_nodes(self, tag_filters):
if self.throw:
raise Exception("oops")
return [
n.node_id for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def non_terminated_node_ips(self, tag_filters):
if self.throw:
raise Exception("oops")
return [
n.internal_ip for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def is_running(self, node_id):
return self.mock_nodes[node_id].state == "running"
def is_terminated(self, node_id):
return self.mock_nodes[node_id].state in ["stopped", "terminated"]
def node_tags(self, node_id):
return self.mock_nodes[node_id].tags
def internal_ip(self, node_id):
return self.mock_nodes[node_id].internal_ip
def external_ip(self, node_id):
return self.mock_nodes[node_id].external_ip
def create_node(self, node_config, tags, count):
self.ready_to_create.wait()
if self.fail_creates:
return
if self.cache_stopped:
for node in self.mock_nodes.values():
if node.state == "stopped" and count > 0:
count -= 1
node.state = "pending"
node.tags.update(tags)
for _ in range(count):
self.mock_nodes[self.next_id] = MockNode(self.next_id, tags.copy())
self.next_id += 1
def set_node_tags(self, node_id, tags):
self.mock_nodes[node_id].tags.update(tags)
def terminate_node(self, node_id):
if self.cache_stopped:
self.mock_nodes[node_id].state = "stopped"
else:
self.mock_nodes[node_id].state = "terminated"
def finish_starting_nodes(self):
for node in self.mock_nodes.values():
if node.state == "pending":
node.state = "running"
SMALL_CLUSTER = {
"cluster_name": "default",
"min_workers": 2,
"max_workers": 2,
"initial_workers": 0,
"autoscaling_mode": "default",
"target_utilization_fraction": 0.8,
"idle_timeout_minutes": 5,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": "/dev/null",
},
"head_node": {
"TestProp": 1,
},
"worker_nodes": {
"TestProp": 2,
},
"file_mounts": {},
"initialization_commands": ["init_cmd"],
"setup_commands": ["setup_cmd"],
"head_setup_commands": ["head_setup_cmd"],
"worker_setup_commands": ["worker_setup_cmd"],
"head_start_ray_commands": ["start_ray_head"],
"worker_start_ray_commands": ["start_ray_worker"],
}
class LoadMetricsTest(unittest.TestCase):
def testUpdate(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {})
assert lm.approx_workers_used() == 0.5
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
assert lm.approx_workers_used() == 1.0
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 0}, {})
assert lm.approx_workers_used() == 2.0
def testLoadMessages(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {})
self.assertEqual(lm.approx_workers_used(), 0.5)
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {"CPU": 1})
self.assertEqual(lm.approx_workers_used(), 1.0)
# Both nodes count as busy since there is a queue on one.
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 2}, {})
self.assertEqual(lm.approx_workers_used(), 2.0)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 0}, {})
self.assertEqual(lm.approx_workers_used(), 2.0)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 1}, {})
self.assertEqual(lm.approx_workers_used(), 2.0)
# No queue anymore, so we're back to exact accounting.
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
self.assertEqual(lm.approx_workers_used(), 1.5)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 1}, {"GPU": 1})
self.assertEqual(lm.approx_workers_used(), 2.0)
lm.update("3.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("4.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("5.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("6.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("7.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("8.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
self.assertEqual(lm.approx_workers_used(), 8.0)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 1}, {}) # no queue anymore
self.assertEqual(lm.approx_workers_used(), 4.5)
def testPruneByNodeIp(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 1}, {"CPU": 0}, {})
lm.update("2.2.2.2", {"CPU": 1}, {"CPU": 0}, {})
lm.prune_active_ips({"1.1.1.1", "4.4.4.4"})
assert lm.approx_workers_used() == 1.0
def testBottleneckResource(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
lm.update("2.2.2.2", {"CPU": 2, "GPU": 16}, {"CPU": 2, "GPU": 2}, {})
assert lm.approx_workers_used() == 1.88
def testHeartbeat(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {})
lm.mark_active("2.2.2.2")
assert "1.1.1.1" in lm.last_heartbeat_time_by_ip
assert "2.2.2.2" in lm.last_heartbeat_time_by_ip
assert "3.3.3.3" not in lm.last_heartbeat_time_by_ip
def testDebugString(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
lm.update("2.2.2.2", {"CPU": 2, "GPU": 16}, {"CPU": 2, "GPU": 2}, {})
lm.update("3.3.3.3", {
"memory": 20,
"object_store_memory": 40
}, {
"memory": 0,
"object_store_memory": 20
}, {})
debug = lm.info_string()
assert ("ResourceUsage=2.0/4.0 CPU, 14.0/16.0 GPU, "
"1.05 GiB/1.05 GiB memory, "
"1.05 GiB/2.1 GiB object_store_memory") in debug
assert "NumNodesConnected=3" in debug
assert "NumNodesUsed=2.88" in debug
class AutoscalingTest(unittest.TestCase):
def setUp(self):
NODE_PROVIDERS["mock"] = \
lambda: (None, self.create_provider)
self.provider = None
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
del NODE_PROVIDERS["mock"]
shutil.rmtree(self.tmpdir)
ray.shutdown()
def waitFor(self, condition, num_retries=50):
for _ in range(num_retries):
if condition():
return
time.sleep(.1)
raise RayTestTimeoutException(
"Timed out waiting for {}".format(condition))
def waitForNodes(self, expected, comparison=None, tag_filters={}):
MAX_ITER = 50
for i in range(MAX_ITER):
n = len(self.provider.non_terminated_nodes(tag_filters))
if comparison is None:
comparison = self.assertEqual
try:
comparison(n, expected)
return
except Exception:
if i == MAX_ITER - 1:
raise
time.sleep(.1)
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
def write_config(self, config):
path = self.tmpdir + "/simple.yaml"
with open(path, "w") as f:
f.write(yaml.dump(config))
return path
def testInvalidConfig(self):
invalid_config = "/dev/null"
with pytest.raises(ValueError):
StandardAutoscaler(
invalid_config, LoadMetrics(), update_interval_s=0)
def testValidation(self):
"""Ensures that schema validation is working."""
config = copy.deepcopy(SMALL_CLUSTER)
try:
validate_config(config)
except Exception:
self.fail("Test config did not pass validation test!")
config["blah"] = "blah"
with pytest.raises(ValueError):
validate_config(config)
del config["blah"]
config["provider"]["blah"] = "blah"
with pytest.raises(ValueError):
validate_config(config)
del config["provider"]["blah"]
del config["provider"]
with pytest.raises(ValueError):
validate_config(config)
def testValidateDefaultConfig(self):
config = {}
config["provider"] = {
"type": "aws",
"region": "us-east-1",
"availability_zone": "us-east-1a",
}
config = fillout_defaults(config)
try:
validate_config(config)
except Exception:
self.fail("Default config did not pass validation test!")
def testScaleUp(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(2)
autoscaler.update()
self.waitForNodes(2)
def testManualAutoscaling(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 50
cores_per_node = 2
config["worker_nodes"] = {"Resources": {"CPU": cores_per_node}}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(0)
autoscaler.request_resources({"CPU": cores_per_node * 10})
for _ in range(5): # Maximum launch batch is 5
time.sleep(0.01)
autoscaler.update()
self.waitForNodes(10)
autoscaler.request_resources({"CPU": cores_per_node * 30})
for _ in range(4): # Maximum launch batch is 5
time.sleep(0.01)
autoscaler.update()
self.waitForNodes(30)
def testTerminateOutdatedNodesGracefully(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 5
config["max_workers"] = 5
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node({}, {TAG_RAY_NODE_TYPE: "worker"}, 10)
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(10)
# Gradually scales down to meet target size, never going too low
for _ in range(10):
autoscaler.update()
self.waitForNodes(5, comparison=self.assertLessEqual)
self.waitForNodes(4, comparison=self.assertGreaterEqual)
# Eventually reaches steady state
self.waitForNodes(5)
def testDynamicScaling(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(0)
autoscaler.update()
self.waitForNodes(2)
# Update the config to reduce the cluster size
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
self.waitForNodes(1)
# Update the config to reduce the cluster size
new_config["min_workers"] = 10
new_config["max_workers"] = 10
self.write_config(new_config)
autoscaler.update()
self.waitForNodes(6)
autoscaler.update()
self.waitForNodes(10)
def testInitialWorkers(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 20
config["initial_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(0)
autoscaler.update()
self.waitForNodes(5) # expected due to batch sizes and concurrency
autoscaler.update()
self.waitForNodes(10)
autoscaler.update()
def testAggressiveAutoscaling(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 20
config["initial_workers"] = 10
config["idle_timeout_minutes"] = 0
config["autoscaling_mode"] = "aggressive"
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node({}, {TAG_RAY_NODE_TYPE: "head"}, 1)
head_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_TYPE: "head"}, )[0]
runner = MockProcessRunner()
lm = LoadMetrics()
lm.local_ip = head_ip
autoscaler = StandardAutoscaler(
config_path,
lm,
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(1)
autoscaler.update()
self.waitForNodes(6) # expected due to batch sizes and concurrency
autoscaler.update()
self.waitForNodes(11)
# Connect the head and workers to end the bringup phase
addrs = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_TYPE: "worker"}, )
addrs += head_ip
for addr in addrs:
lm.update(addr, {"CPU": 2}, {"CPU": 0}, {})
lm.update(addr, {"CPU": 2}, {"CPU": 2}, {})
assert autoscaler.bringup
autoscaler.update()
assert not autoscaler.bringup
autoscaler.update()
self.waitForNodes(1)
# All of the nodes are down. Simulate some load on the head node
lm.update(head_ip, {"CPU": 2}, {"CPU": 0}, {})
autoscaler.update()
self.waitForNodes(6) # expected due to batch sizes and concurrency
autoscaler.update()
self.waitForNodes(11)
def testDelayedLaunch(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
# Update will try to create, but will block until we set the flag
self.provider.ready_to_create.clear()
autoscaler.update()
assert autoscaler.num_launches_pending.value == 2
assert len(self.provider.non_terminated_nodes({})) == 0
# Set the flag, check it updates
self.provider.ready_to_create.set()
self.waitForNodes(2)
assert autoscaler.num_launches_pending.value == 0
# Update the config to reduce the cluster size
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
assert len(self.provider.non_terminated_nodes({})) == 1
def testDelayedLaunchWithFailure(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 10
config["max_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=8,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
# update() should launch a wave of 5 nodes (max_launch_batch)
# Force this first wave to block.
rtc1 = self.provider.ready_to_create
rtc1.clear()
autoscaler.update()
# Synchronization: wait for launchy thread to be blocked on rtc1
if hasattr(rtc1, "_cond"): # Python 3.5
waiters = rtc1._cond._waiters
else: # Python 2.7
waiters = rtc1._Event__cond._Condition__waiters
self.waitFor(lambda: len(waiters) == 1)
assert autoscaler.num_launches_pending.value == 5
assert len(self.provider.non_terminated_nodes({})) == 0
# Call update() to launch a second wave of 3 nodes,
# as 5 + 3 = 8 = max_concurrent_launches.
# Make this wave complete immediately.
rtc2 = threading.Event()
self.provider.ready_to_create = rtc2
rtc2.set()
autoscaler.update()
self.waitForNodes(3)
assert autoscaler.num_launches_pending.value == 5
# The first wave of 5 will now tragically fail
self.provider.fail_creates = True
rtc1.set()
self.waitFor(lambda: autoscaler.num_launches_pending.value == 0)
assert len(self.provider.non_terminated_nodes({})) == 3
# Retry the first wave, allowing it to succeed this time
self.provider.fail_creates = False
autoscaler.update()
self.waitForNodes(8)
assert autoscaler.num_launches_pending.value == 0
# Final wave of 2 nodes
autoscaler.update()
self.waitForNodes(10)
assert autoscaler.num_launches_pending.value == 0
def testUpdateThrottling(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=10)
autoscaler.update()
self.waitForNodes(2)
assert autoscaler.num_launches_pending.value == 0
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
# not updated yet
# note that node termination happens in the main thread, so
# we do not need to add any delay here before checking
assert len(self.provider.non_terminated_nodes({})) == 2
assert autoscaler.num_launches_pending.value == 0
def testLaunchConfigChange(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
autoscaler = StandardAutoscaler(
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
autoscaler.update()
self.waitForNodes(2)
# Update the config to change the node type
new_config = SMALL_CLUSTER.copy()
new_config["worker_nodes"]["InstanceType"] = "updated"
self.write_config(new_config)
self.provider.ready_to_create.clear()
for _ in range(5):
autoscaler.update()
self.waitForNodes(0)
self.provider.ready_to_create.set()
self.waitForNodes(2)
def testIgnoresCorruptedConfig(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=10,
max_concurrent_launches=10,
process_runner=runner,
max_failures=0,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(2)
# Write a corrupted config
self.write_config("asdf")
for _ in range(10):
autoscaler.update()
time.sleep(0.1)
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 2
# New a good config again
new_config = SMALL_CLUSTER.copy()
new_config["min_workers"] = 10
new_config["max_workers"] = 10
self.write_config(new_config)
autoscaler.update()
self.waitForNodes(10)
def testMaxFailures(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
self.provider.throw = True
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=2,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
with pytest.raises(Exception):
autoscaler.update()
def testLaunchNewNodeOnOutOfBandTerminate(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
for node in self.provider.mock_nodes.values():
node.state = "terminated"
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(2)
def testConfiguresNewNodes(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
def testReportsConfigFailures(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["provider"]["type"] = "external"
config = fillout_defaults(config)
config["provider"]["type"] = "mock"
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner(fail_cmds=["setup_cmd"])
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED})
def testConfiguresOutdatedNodes(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.calls = []
new_config = SMALL_CLUSTER.copy()
new_config["worker_setup_commands"] = ["cmdX", "cmdY"]
self.write_config(new_config)
autoscaler.update()
autoscaler.update()
self.waitFor(lambda: len(runner.calls) > 0)
def testScaleUpBasedOnLoad(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 1
config["max_workers"] = 10
config["target_utilization_fraction"] = 0.5
config_path = self.write_config(config)
self.provider = MockProvider()
lm = LoadMetrics()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(1)
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 1
# Scales up as nodes are reported as used
local_ip = services.get_node_ip_address()
lm.update(local_ip, {"CPU": 2}, {"CPU": 0}, {}) # head
lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 0}, {}) # worker 1
autoscaler.update()
self.waitForNodes(3)
lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 0}, {})
autoscaler.update()
self.waitForNodes(5)
# Holds steady when load is removed
lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 2}, {})
lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 2}, {})
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 5
# Scales down as nodes become unused
lm.last_used_time_by_ip["172.0.0.0"] = 0
lm.last_used_time_by_ip["172.0.0.1"] = 0
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 3
lm.last_used_time_by_ip["172.0.0.2"] = 0
lm.last_used_time_by_ip["172.0.0.3"] = 0
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 1
def testDontScaleBelowTarget(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 2
config["target_utilization_fraction"] = 0.5
config_path = self.write_config(config)
self.provider = MockProvider()
lm = LoadMetrics()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 0
# Scales up as nodes are reported as used
local_ip = services.get_node_ip_address()
lm.update(local_ip, {"CPU": 2}, {"CPU": 0}, {}) # head
# 1.0 nodes used => target nodes = 2 => target workers = 1
autoscaler.update()
self.waitForNodes(1)
# Make new node idle, and never used.
# Should hold steady as target is still 2.
lm.update("172.0.0.0", {"CPU": 0}, {"CPU": 0}, {})
lm.last_used_time_by_ip["172.0.0.0"] = 0
autoscaler.update()
assert len(self.provider.non_terminated_nodes({})) == 1
# Reduce load on head => target nodes = 1 => target workers = 0
lm.update(local_ip, {"CPU": 2}, {"CPU": 1}, {})
autoscaler.update()
assert len(self.provider.non_terminated_nodes({})) == 0
def testRecoverUnhealthyWorkers(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
# Mark a node as unhealthy
for _ in range(5):
if autoscaler.updaters:
time.sleep(0.05)
autoscaler.update()
assert not autoscaler.updaters
num_calls = len(runner.calls)
lm.last_heartbeat_time_by_ip["172.0.0.0"] = 0
autoscaler.update()
self.waitFor(lambda: len(runner.calls) > num_calls, num_retries=150)
def testExternalNodeScaler(self):
config = SMALL_CLUSTER.copy()
config["provider"] = {
"type": "external",
"module": "ray.autoscaler.node_provider.NodeProvider",
}
config_path = self.write_config(config)
autoscaler = StandardAutoscaler(
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
assert isinstance(autoscaler.provider, NodeProvider)
def testExternalNodeScalerWrongImport(self):
config = SMALL_CLUSTER.copy()
config["provider"] = {
"type": "external",
"module": "mymodule.provider_class",
}
invalid_provider = self.write_config(config)
with pytest.raises(ImportError):
StandardAutoscaler(
invalid_provider, LoadMetrics(), update_interval_s=0)
def testExternalNodeScalerWrongModuleFormat(self):
config = SMALL_CLUSTER.copy()
config["provider"] = {
"type": "external",
"module": "does-not-exist",
}
invalid_provider = self.write_config(config)
with pytest.raises(ValueError):
StandardAutoscaler(
invalid_provider, LoadMetrics(), update_interval_s=0)
def testSetupCommandsWithNoNodeCaching(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 1
config["max_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=False)
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(1)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_has_call("172.0.0.0", "init_cmd")
runner.assert_has_call("172.0.0.0", "setup_cmd")
runner.assert_has_call("172.0.0.0", "worker_setup_cmd")
runner.assert_has_call("172.0.0.0", "start_ray_worker")
# Check the node was not reused
self.provider.terminate_node(0)
autoscaler.update()
self.waitForNodes(1)
runner.clear_history()
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_has_call("172.0.0.1", "init_cmd")
runner.assert_has_call("172.0.0.1", "setup_cmd")
runner.assert_has_call("172.0.0.1", "worker_setup_cmd")
runner.assert_has_call("172.0.0.1", "start_ray_worker")
def testSetupCommandsWithStoppedNodeCaching(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 1
config["max_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=True)
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(1)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_has_call("172.0.0.0", "init_cmd")
runner.assert_has_call("172.0.0.0", "setup_cmd")
runner.assert_has_call("172.0.0.0", "worker_setup_cmd")
runner.assert_has_call("172.0.0.0", "start_ray_worker")
# Check the node was indeed reused
self.provider.terminate_node(0)
autoscaler.update()
self.waitForNodes(1)
runner.clear_history()
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_not_has_call("172.0.0.0", "init_cmd")
runner.assert_not_has_call("172.0.0.0", "setup_cmd")
runner.assert_not_has_call("172.0.0.0", "worker_setup_cmd")
runner.assert_has_call("172.0.0.0", "start_ray_worker")
runner.clear_history()
autoscaler.update()
runner.assert_not_has_call("172.0.0.0", "setup_cmd")
# We did not start any other nodes
runner.assert_not_has_call("172.0.0.1", " ")
def testMultiNodeReuse(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 3
config["max_workers"] = 3
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=True)
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(3)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
3, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
self.provider.terminate_node(0)
self.provider.terminate_node(1)
self.provider.terminate_node(2)
runner.clear_history()
# Scale up to 10 nodes, check we reuse the first 3 and add 7 more.
config["min_workers"] = 10
config["max_workers"] = 10
self.write_config(config)
autoscaler.update()
autoscaler.update()
self.waitForNodes(10)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
10, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
autoscaler.update()
for i in [0, 1, 2]:
runner.assert_not_has_call("172.0.0.{}".format(i), "setup_cmd")
runner.assert_has_call("172.0.0.{}".format(i), "start_ray_worker")
for i in [3, 4, 5, 6, 7, 8, 9]:
runner.assert_has_call("172.0.0.{}".format(i), "setup_cmd")
runner.assert_has_call("172.0.0.{}".format(i), "start_ray_worker")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 35.432258 | 79 | 0.593747 |
f42bc4f14eaca63acf32f55b3a6569ad713a864a | 934 | py | Python | isi_sdk_8_1_0/test/test_storagepool_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_0/test/test_storagepool_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_0/test/test_storagepool_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.storagepool_settings import StoragepoolSettings # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestStoragepoolSettings(unittest.TestCase):
"""StoragepoolSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolSettings(self):
"""Test StoragepoolSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.storagepool_settings.StoragepoolSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.780488 | 95 | 0.715203 |
25bb443283b1f8f77d50cc47c69d6fa49d4d43fd | 5,322 | py | Python | test/integration/029_docs_generate_tests/test_docs_generate.py | lewish/dbt | fa7f5070c4288d79daa7b196a408eb6853ee6385 | [
"Apache-2.0"
] | null | null | null | test/integration/029_docs_generate_tests/test_docs_generate.py | lewish/dbt | fa7f5070c4288d79daa7b196a408eb6853ee6385 | [
"Apache-2.0"
] | null | null | null | test/integration/029_docs_generate_tests/test_docs_generate.py | lewish/dbt | fa7f5070c4288d79daa7b196a408eb6853ee6385 | [
"Apache-2.0"
] | null | null | null | import json
import os
from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
class TestDocsGenerate(DBTIntegrationTest):
def setUp(self):
super(TestDocsGenerate, self).setUp()
self.run_sql_file("test/integration/029_docs_generate_tests/seed.sql")
@property
def schema(self):
return "simple_dependency_029"
@property
def models(self):
return "test/integration/029_docs_generate_tests/models"
@property
def project_config(self):
return {
"repositories": [
'https://github.com/fishtown-analytics/dbt-integration-project'
]
}
@attr(type='postgres')
def test_simple_generate(self):
self.run_dbt(["deps"])
self.run_dbt(["docs", "generate"])
self.assertTrue(os.path.exists('./target/catalog.json'))
with open('./target/catalog.json') as fp:
data = json.load(fp)
my_schema_name = self.unique_schema()
self.assertIn(my_schema_name, data)
my_schema = data[my_schema_name]
expected = {
'seed': {
'metadata': {
'schema': my_schema_name,
'name': 'seed',
'type': 'BASE TABLE',
'comment': None
},
'columns': [
{
'name': 'id',
'index': 1,
'type': 'integer',
'comment': None
},
{
'name': 'first_name',
'index': 2,
'type': 'character varying',
'comment': None
},
{
'name': 'email', 'index': 3,
'type': 'character varying',
'comment': None,
},
{
'name': 'ip_address',
'index': 4,
'type': 'character varying',
'comment': None
},
{
'name': 'updated_at',
'index': 5,
'type': 'timestamp without time zone',
'comment': None
},
],
},
'seed_config_expected_1':
{
'metadata': {
'schema': my_schema_name,
'name': 'seed_config_expected_1',
'type': 'BASE TABLE',
'comment': None,
},
'columns': [
{
'name': 'id',
'index': 1,
'type': 'integer',
'comment': None,
},
{
'name': 'first_name',
'index': 2,
'type': 'character varying',
'comment': None,
},
{
'name': 'email',
'index': 3,
'type': 'character varying',
'comment': None,
},
{
'name': 'ip_address',
'index': 4,
'type': 'character varying',
'comment': None,
},
{
'name': 'updated_at',
'index': 5,
'type': 'timestamp without time zone',
'comment': None,
},
{
'name': 'c1',
'index': 6,
'type': 'text',
'comment': None,
},
{
'name': 'c2',
'index': 7,
'type': 'text',
'comment': None,
},
{
'name': 'some_bool',
'index': 8,
'type': 'text',
'comment': None,
},
],
},
'seed_summary': {
'metadata': {
'schema': my_schema_name,
'name': 'seed_summary',
'type': 'BASE TABLE',
'comment': None
},
'columns': [
{
'name': 'year',
'index': 1,
'type': 'timestamp without time zone',
'comment': None,
},
{
'name': 'count',
'index': 2,
'type': 'bigint',
'comment': None,
},
]
}
}
self.assertEqual(expected, my_schema)
| 32.060241 | 79 | 0.316986 |
cc3b0a33b25c1739cce23e2ee1d4d68117b05f00 | 1,234 | py | Python | warp/data/dependencies.py | j-helland/warp | 2a71346f0ec4d4e6fd45ed3b5e972b683724287c | [
"Unlicense"
] | null | null | null | warp/data/dependencies.py | j-helland/warp | 2a71346f0ec4d4e6fd45ed3b5e972b683724287c | [
"Unlicense"
] | null | null | null | warp/data/dependencies.py | j-helland/warp | 2a71346f0ec4d4e6fd45ed3b5e972b683724287c | [
"Unlicense"
] | null | null | null | # std
from copy import deepcopy
# extern
# warp
import warp
from warp.data.decorator import Decorator
# types
from typing import Dict
__all__ = ['dependencies']
def dependencies(**kwargs: Dict[str, warp.Product]):
"""Link [`Product`](../../pipes/attributes/#Product) instances as dependencies of a [`Pipe`](../../pipes/pipes/#Pipe) subclass.
This decorator should be decorate the `run` function therein.
Example:
```python
class B(Pipe):
@dependencies(product=A.product)
def run(self) -> None:
# At runtime, `A.product` will be available to this pipe as a class-level attribute under the kwarg name.
self.product
```
Arguments:
kwargs: You must specify dependencies with `str` type keys and [`Product`](../../pipes/attributes/#Product) instance values.
"""
# preprocessing of products
for k, v in kwargs.items():
if not isinstance(v, warp.Product):
kwargs[k] = warp.Product(v, external=True)
class Dependencies(Decorator):
def get_attrs(self):
return '__dependencies__', deepcopy(kwargs) # full copy to avoid filling with data at runtime
return Dependencies
| 28.697674 | 132 | 0.641005 |
353fb424255985eea9309daacb9affcd9ccc8b92 | 561 | py | Python | app/logic/benchmark/models/__init__.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | 10 | 2017-01-13T06:28:04.000Z | 2020-11-18T13:00:26.000Z | app/logic/benchmark/models/__init__.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | null | null | null | app/logic/benchmark/models/__init__.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | 2 | 2018-03-29T14:10:53.000Z | 2019-11-20T08:21:57.000Z | """ Automatic file """
# Duplicate code
# pylint: disable=R0801
from app.logic.benchmark.models.BenchmarkDefinitionModel import BenchmarkDefinitionEntry
from app.logic.benchmark.models.BenchmarkDefinitionWorkerPassModel import BenchmarkDefinitionWorkerPassEntry
from app.logic.benchmark.models.BenchmarkExecutionModel import BenchmarkExecutionEntry
from app.logic.benchmark.models.BenchmarkFluctuationWaiverModel import BenchmarkFluctuationWaiverEntry
from app.logic.benchmark.models.BenchmarkFluctuationOverrideModel import BenchmarkFluctuationOverrideEntry
| 51 | 108 | 0.889483 |
9954afda5fd1aeb8109da8d05c956ec4e0a9ec8f | 597 | py | Python | oops_fhir/r4/value_set/identifier_type_codes.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/identifier_type_codes.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/identifier_type_codes.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
__all__ = ["IdentifierTypeCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class IdentifierTypeCodes(ValueSet):
"""
IdentifierType
A coded type for an identifier that can be used to determine which
identifier to use for a specific purpose.
Status: active - Version: 4.0.1
http://hl7.org/fhir/ValueSet/identifier-type
"""
# TODO: fix this template issue1
pass
class Meta:
resource = _resource
| 19.9 | 70 | 0.721943 |
7980276d769f09c612b7ced93fce87a17c909077 | 1,525 | py | Python | mailtorpedo/sender.py | abm-saeid/torpedo | 3cfb571bfbc7b991312e04354d5773d7dc3b19a2 | [
"MIT"
] | 1 | 2021-09-01T05:55:44.000Z | 2021-09-01T05:55:44.000Z | mailtorpedo/sender.py | abm-saeid/torpedo | 3cfb571bfbc7b991312e04354d5773d7dc3b19a2 | [
"MIT"
] | null | null | null | mailtorpedo/sender.py | abm-saeid/torpedo | 3cfb571bfbc7b991312e04354d5773d7dc3b19a2 | [
"MIT"
] | null | null | null | from os.path import exists
import json
import smtplib
from .binder import Binder
class Sender():
def __init__(self, creds, reader, template):
if not exists(creds):
raise FileNotFoundError("Credential file not found.")
else:
with open(creds, 'r', encoding='utf-8') as credfile:
self.credentials = json.loads(credfile.read())
self.binder = Binder(reader, template)
def get_server(self):
server = smtplib.SMTP(
host=self.credentials['HOST'],
port=self.credentials['PORT'],
)
server.starttls()
server.login(
user=self.credentials['USER'],
password=self.credentials['PASSWORD']
)
return server
def send(self):
mail_list = list(self.binder.parse())
if 'SENDER_EMAIL' not in self.credentials.keys():
email = self.credentials['USER']
else:
email = self.credentials['SENDER_EMAIL']
server = self.get_server()
for receiver in mail_list:
try:
server.sendmail(email, receiver[0], receiver[1].as_string())
print("Mail sent to", receiver[0])
except smtplib.SMTPServerDisconnected:
server = self.get_server()
receiver[1]['From'] = email
server.sendmail(email, receiver[0], receiver[1].as_string())
print("Mail sent to", receiver[0])
server.quit() | 31.122449 | 76 | 0.56 |
3c62771578f7b519ec81bcdb24a02b3b40606d9f | 138 | py | Python | API/rate/config.py | amitt001/Analytics-App | 4e8568321d456afdd2e0689c160567fa1c1e60be | [
"MIT"
] | 1 | 2015-12-09T16:38:57.000Z | 2015-12-09T16:38:57.000Z | API/rate/config.py | amitt001/Analytics-App | 4e8568321d456afdd2e0689c160567fa1c1e60be | [
"MIT"
] | null | null | null | API/rate/config.py | amitt001/Analytics-App | 4e8568321d456afdd2e0689c160567fa1c1e60be | [
"MIT"
] | null | null | null | ##################
######PYMONGO#####
##################
REVIEW_DB = 'playDB'
REVIEW_COLLECTION = 'snapv2_0'
RATED_COLLECTION = 'test' | 19.714286 | 30 | 0.485507 |
708330e0d3c3c4474f03c53e366e439c3fe5433a | 5,649 | py | Python | kernel_matrix_benchmarks/algorithms/base.py | PierreMarchand20/kernel-matrix-benchmarks | d31eac1b226358abd2e018493f5671b5a303d9dd | [
"MIT"
] | null | null | null | kernel_matrix_benchmarks/algorithms/base.py | PierreMarchand20/kernel-matrix-benchmarks | d31eac1b226358abd2e018493f5671b5a303d9dd | [
"MIT"
] | null | null | null | kernel_matrix_benchmarks/algorithms/base.py | PierreMarchand20/kernel-matrix-benchmarks | d31eac1b226358abd2e018493f5671b5a303d9dd | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from multiprocessing.pool import ThreadPool
import psutil
import numpy as np
class BaseAlgorithm(object):
def __init__(
self, *, kernel, dimension, normalize_rows=False, precision=np.float64
):
"""Sets up some of the basic attributes of our algorithm.
Args:
kernel (str):
The kernel function that is expected to be used on the dataset.
normalize_rows (bool, optional):
Should we normalize the rows of the kernel matrix so that
they sum up to 1?
Defaults to False.
precision (numpy dtype, optional):
Desired precision level of the computation. Note that the output
array will always be cast as a float64 NumPy array.
Defaults to np.float64.
"""
self.kernel = kernel
self.dimension = dimension
self.precision = precision
self.normalize_rows = normalize_rows
self.name = "BaseAlgorithm()"
def done(self):
"""Frees memory and files after a computation even if an exception has been thrown."""
pass
def get_memory_usage(self):
"""Return the current memory usage of this algorithm instance
(in kilobytes), or None if this information is not available."""
return psutil.Process().memory_info().rss / 1024
def set_query_arguments(self, **kwargs):
"""Sets additional arguments, after the pre-computation step but before the query."""
pass
def get_additional(self):
return {}
def __str__(self):
return self.name
class BaseProduct(BaseAlgorithm):
"""Base class for kernel matrix products and attention layers."""
task = "product"
def prepare_data(
self,
*,
source_points,
target_points,
same_points=False,
density_estimation=False,
):
"""Load data for the pre-processing step, outside of the timer.
This routine is not included in the timer and may be used
to e.g. load the input data from the RAM to a GPU device.
Args:
source_points ((M,D) array): the reference point cloud.
target_points ((N,D) array): query points.
same_points (bool):
Should we assume that the target point cloud is equal to the source?
Defaults to False.
density_estimation (bool):
Should we assume that the source signal is equal to 1?
Defaults to False.
"""
pass
def fit(self):
"""Fits the algorithm to a source distribution - this operation is timed."""
pass
def prepare_query(self, *, source_signal):
"""Reformat or recasts the input target points, outside of the timer.
To ensure a fair benchmark, we may need to
e.g. load queries on the GPU or change the numerical precision.
Args:
source_signal ((M,E) array or None): the reference signal.
Note that if self.density_estimation=None, the algorithm may assume
that E=1 and that the source signal is uniformly equal to 1,
i.e. we perform kernel density estimation.
"""
pass
def query(self):
"""Performs the computation of interest for all target points - this operation is timed.
Returns:
None: see the get_result() method below.
"""
self.res = None
def get_result(self):
"""Returns the result of query() as a float64 NumPy array, outside of the timer.
This routine is not included in the timer and may be used
to e.g. unload the result from a GPU device and cast it as a NumPy array.
Returns:
(N,E) array: output of the computation at the N points x_i.
"""
return np.ascontiguousarray(self.res, dtype=np.float64)
class BaseSolver(BaseAlgorithm):
"""Base class for kernel matrix solvers."""
task = "solver"
def prepare_data(self, *, source_points):
"""Load data for the pre-processing step, outside of the timer.
This routine is not included in the timer and may be used
to e.g. load the result from the RAM to a GPU device.
Args:
source_points ((M,D) array): the reference point cloud.
"""
pass
def fit(self):
"""Fits the algorithm to a source distribution - this operation is timed."""
pass
def prepare_query(self, *, target_signal):
"""Reformat or recasts the input target signal, outside of the timer.
To ensure a fair benchmark, we may need to
e.g. load the signal on the GPU or change the numerical precision.
Args:
target_signal ((N,E) array): output of the kernel matrix product.
"""
pass
def query(self):
"""Computes the solution of a kernel linear system and store it in self.res - this operation is timed.
Returns:
None: see the get_result() method below.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the result of query() as a float64 NumPy array, outside of the timer.
This routine is not included in the timer and may be used
to e.g. unload the result from a GPU device and cast it as a NumPy array.
Returns:
(M,E) array: output of the computation at the M points y_j.
"""
return np.ascontiguousarray(self.res, dtype=np.float64)
| 33.426036 | 110 | 0.61179 |
85d880fe1f9b1e80915ffaa86dcbded62681b73a | 297 | py | Python | Chapter09_code/ch09_05_config/__openerp__.py | PacktPublishing/Odoo-Development-Cookbook | 5553110c0bc352c4541f11904e236cad3c443b8b | [
"MIT"
] | 55 | 2016-05-23T16:05:50.000Z | 2021-07-19T00:16:46.000Z | Chapter09_code/ch09_05_config/__openerp__.py | kogkog098/Odoo-Development-Cookbook | 166c9b98efbc9108b30d719213689afb1f1c294d | [
"MIT"
] | 1 | 2016-12-09T02:14:21.000Z | 2018-07-02T09:02:20.000Z | Chapter09_code/ch09_05_config/__openerp__.py | kogkog098/Odoo-Development-Cookbook | 166c9b98efbc9108b30d719213689afb1f1c294d | [
"MIT"
] | 52 | 2016-06-01T20:03:59.000Z | 2020-10-31T23:58:25.000Z | # -*- coding: utf-8 -*-
{
'name': 'Cookbook Ch09 code',
'depends': ['base_setup'],
'category': 'Library',
'data': [
'security/ir.model.access.csv',
'security/library_security.xml',
'views/library_book.xml',
'views/res_config_settings.xml',
],
}
| 22.846154 | 40 | 0.552189 |
3001a4228981ed5edc9b96c32f54fd48c043ec8e | 6,047 | py | Python | posthog/helpers/dashboard_templates.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | 58 | 2020-08-26T16:26:18.000Z | 2022-03-30T05:32:23.000Z | posthog/helpers/dashboard_templates.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | 15 | 2021-11-09T10:49:34.000Z | 2021-11-09T16:11:01.000Z | posthog/helpers/dashboard_templates.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | 13 | 2020-09-08T13:27:07.000Z | 2022-03-19T17:27:10.000Z | import random
from typing import Callable, Dict, List
from django.utils.timezone import now
from posthog.constants import (
BREAKDOWN,
BREAKDOWN_TYPE,
DATE_FROM,
DISPLAY,
ENTITY_ID,
ENTITY_TYPE,
INSIGHT,
INSIGHT_TRENDS,
INTERVAL,
PROPERTIES,
SHOWN_AS,
TREND_FILTER_TYPE_EVENTS,
TRENDS_CUMULATIVE,
TRENDS_PIE,
TRENDS_STICKINESS,
)
from posthog.models.dashboard import Dashboard
from posthog.models.insight import Insight
DASHBOARD_COLORS: List[str] = ["white", "blue", "green", "purple", "black"]
def _create_default_app_items(dashboard: Dashboard) -> None:
Insight.objects.create(
team=dashboard.team,
dashboard=dashboard,
name="Daily Active Users (DAUs)",
filters={
TREND_FILTER_TYPE_EVENTS: [{"id": "$pageview", "math": "dau", "type": TREND_FILTER_TYPE_EVENTS}],
INTERVAL: "day",
},
last_refresh=now(),
description="Shows the number of unique users that use your app everyday.",
)
Insight.objects.create(
team=dashboard.team,
dashboard=dashboard,
name="Weekly revenue (from Order Completed)",
filters={
TREND_FILTER_TYPE_EVENTS: [
{"id": "Order Completed", "math": "sum", "type": TREND_FILTER_TYPE_EVENTS, "math_property": "revenue"}
],
INTERVAL: "week",
DATE_FROM: "-60d",
},
last_refresh=now(),
color=random.choice(DASHBOARD_COLORS),
description="Shows how much revenue your app is capturing from orders every week. "
'Sales should be registered with an "Order Completed" event.',
)
Insight.objects.create(
team=dashboard.team,
dashboard=dashboard,
name="Cumulative DAUs",
filters={
TREND_FILTER_TYPE_EVENTS: [{"id": "$pageview", "math": "dau", "type": TREND_FILTER_TYPE_EVENTS}],
INTERVAL: "day",
DATE_FROM: "-30d",
DISPLAY: TRENDS_CUMULATIVE,
},
last_refresh=now(),
color=random.choice(DASHBOARD_COLORS),
description="Shows the total cumulative number of unique users that have been using your app.",
)
Insight.objects.create(
team=dashboard.team,
dashboard=dashboard,
name="Repeat users over time",
filters={
TREND_FILTER_TYPE_EVENTS: [{"id": "$pageview", "math": "dau", "type": TREND_FILTER_TYPE_EVENTS}],
ENTITY_ID: "$pageview",
ENTITY_TYPE: TREND_FILTER_TYPE_EVENTS,
INTERVAL: "day",
SHOWN_AS: TRENDS_STICKINESS,
INSIGHT: INSIGHT_TRENDS,
},
last_refresh=now(),
color=random.choice(DASHBOARD_COLORS),
description="Shows you how many users visited your app for a specific number of days "
'(e.g. a user that visited your app twice in the time period will be shown under "2 days").',
)
Insight.objects.create(
team=dashboard.team,
dashboard=dashboard,
name="Sample - Purchase conversion funnel",
filters={
TREND_FILTER_TYPE_EVENTS: [
{"id": "$pageview", "type": TREND_FILTER_TYPE_EVENTS, "order": 0},
{
"id": "$autocapture",
"name": "Clicked purchase button",
"type": TREND_FILTER_TYPE_EVENTS,
PROPERTIES: [
{"key": "$event_type", "type": "event", "value": "click"},
{"key": "text", "type": "element", "value": "Purchase"},
],
"order": 1,
},
{
"id": "$autocapture",
"name": "Submitted checkout form",
"type": TREND_FILTER_TYPE_EVENTS,
PROPERTIES: [
{"key": "$event_type", "type": "event", "value": "submit"},
{"key": "$pathname", "type": "event", "value": "/purchase"},
],
"order": 2,
},
{"id": "Order Completed", "name": "Order Completed", "type": TREND_FILTER_TYPE_EVENTS, "order": 3},
],
INSIGHT: "FUNNELS",
},
last_refresh=now(),
color=random.choice(DASHBOARD_COLORS),
is_sample=True,
description="This is a sample of how a user funnel could look like. It represents the number of users that performed "
"a specific action at each step.",
)
Insight.objects.create(
team=dashboard.team,
dashboard=dashboard,
name="Users by browser (last 2 weeks)",
filters={
TREND_FILTER_TYPE_EVENTS: [{"id": "$pageview", "math": "dau", "type": TREND_FILTER_TYPE_EVENTS}],
DATE_FROM: "-14d",
INTERVAL: "day",
BREAKDOWN_TYPE: "person",
BREAKDOWN: "$browser",
DISPLAY: TRENDS_PIE,
},
last_refresh=now(),
description="Shows a breakdown of browsers used to visit your app per unique users in the last 14 days.",
)
Insight.objects.create(
team=dashboard.team,
dashboard=dashboard,
name="Users by traffic source",
filters={
TREND_FILTER_TYPE_EVENTS: [{"id": "$pageview", "math": "dau", "type": TREND_FILTER_TYPE_EVENTS}],
INTERVAL: "day",
BREAKDOWN_TYPE: "event",
BREAKDOWN: "$initial_referring_domain",
},
last_refresh=now(),
description="Shows a breakdown of where unique users came from when visiting your app.",
)
DASHBOARD_TEMPLATES: Dict[str, Callable] = {
"DEFAULT_APP": _create_default_app_items,
}
def create_dashboard_from_template(template_key: str, dashboard: Dashboard) -> None:
if template_key not in DASHBOARD_TEMPLATES:
raise AttributeError(f"Invalid template key `{template_key}` provided.")
DASHBOARD_TEMPLATES[template_key](dashboard)
| 34.953757 | 126 | 0.574004 |
21017d7d6694150e80921cb8ea5abbefcf1db5fa | 6,055 | py | Python | nokia7seg.py | mcauser/MicroPython-ESP8266-Nokia-5110-Quad-7-segment | 369dc3c675e0dd770762beee6ea0dbfcdcf58127 | [
"MIT"
] | 3 | 2018-02-08T19:07:09.000Z | 2018-12-07T00:04:32.000Z | nokia7seg.py | mcauser/MicroPython-ESP8266-Nokia-5110-Quad-7-segment | 369dc3c675e0dd770762beee6ea0dbfcdcf58127 | [
"MIT"
] | null | null | null | nokia7seg.py | mcauser/MicroPython-ESP8266-Nokia-5110-Quad-7-segment | 369dc3c675e0dd770762beee6ea0dbfcdcf58127 | [
"MIT"
] | null | null | null | """
MicroPython Nokia 5110 7-segment PCD8544 84x48 LCD driver
https://github.com/mcauser/MicroPython-ESP8266-Nokia-5110-Quad-7-segment
MIT License
Copyright (c) 2016 Mike Causer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Emulating a TM1637 quad 7-segment display module on a Nokia 5110 display
import framebuf
from time import sleep_ms
# 0-9, a-z, blank, dash, star
_SEGMENTS = bytearray(b'\x3F\x06\x5B\x4F\x66\x6D\x7D\x07\x7F\x6F\x77\x7C\x39\x5E\x79\x71\x3D\x76\x06\x1E\x76\x38\x55\x54\x3F\x73\x67\x50\x6D\x78\x3E\x1C\x2A\x76\x6E\x5B\x00\x40\x63')
class Nokia7Seg:
def __init__(self,lcd):
self.lcd = lcd
self.buf = bytearray((lcd.height // 8) * lcd.width)
self.fbuf = framebuf.FrameBuffer(self.buf, lcd.width, lcd.height, framebuf.MVLSB)
self.pos = 0
self.clear()
def _write_byte(self, b):
x = self.pos * 21
if self.pos >= 2:
x += 2
y = 12
# a segment
self.fbuf.hline(x+1, y+0, 14, (b & 0x01))
self.fbuf.hline(x+2, y+1, 12, (b & 0x01))
self.fbuf.hline(x+3, y+2, 10, (b & 0x01))
# b segment
self.fbuf.vline(x+13, y+3, 8, (b & 0x02))
self.fbuf.vline(x+14, y+2, 10, (b & 0x02))
self.fbuf.vline(x+15, y+1, 10, (b & 0x02))
# c segment
self.fbuf.vline(x+13, y+14, 8, (b & 0x04))
self.fbuf.vline(x+14, y+13, 10, (b & 0x04))
self.fbuf.vline(x+15, y+14, 10, (b & 0x04))
# d segment
self.fbuf.hline(x+3, y+22, 10, (b & 0x08))
self.fbuf.hline(x+2, y+23, 12, (b & 0x08))
self.fbuf.hline(x+1, y+24, 14, (b & 0x08))
# e segment
self.fbuf.vline(x+0, y+14, 10, (b & 0x10))
self.fbuf.vline(x+1, y+13, 10, (b & 0x10))
self.fbuf.vline(x+2, y+14, 8, (b & 0x10))
# f segment
self.fbuf.vline(x+0, y+1, 10, (b & 0x20))
self.fbuf.vline(x+1, y+2, 10, (b & 0x20))
self.fbuf.vline(x+2, y+3, 8, (b & 0x20))
# g segment
self.fbuf.hline(x+3, y+11, 10, (b & 0x40))
self.fbuf.hline(x+2, y+12, 12, (b & 0x40))
self.fbuf.hline(x+3, y+13, 10, (b & 0x40))
# dot segment
self.fbuf.fill_rect(x+17, y+23, 2, 2, (b & 0x80))
self.pos += 1
self._draw()
def _draw(self):
self.lcd.data(self.buf)
def clear(self, color=0):
"""Fill the display with off pixels"""
self.fbuf.fill(color)
self._draw()
def colon(self, on=True):
"""Display a colon between the 2nd and 3rd segments"""
# colon top square
self.fbuf.fill_rect(40, 19, 2, 2, (1 if on else 0))
# colon bottom square
self.fbuf.fill_rect(40, 28, 2, 2, (1 if on else 0))
self._draw()
def write(self, segments, pos=0):
"""Display up to 4 segments moving right from a given position.
The MSB in the 2nd segment controls the colon between the 2nd
and 3rd segments."""
if not 0 <= pos <= 3:
raise ValueError("Position out of range")
self.pos = pos
for seg in segments:
self._write_byte(seg)
def encode_digit(self, digit):
"""Convert a character 0-9, a-f to a segment."""
return _SEGMENTS[digit & 0x0f]
def encode_string(self, string):
"""Convert an up to 4 character length string containing 0-9, a-z,
space, dash, star to an array of segments, matching the length of the
source string."""
segments = bytearray(len(string))
for i in range(len(string)):
segments[i] = self.encode_char(string[i])
return segments
def encode_char(self, char):
"""Convert a character 0-9, a-z, space, dash or star to a segment."""
o = ord(char)
if o == 32:
return _SEGMENTS[36] # space
if o == 42:
return _SEGMENTS[38] # star/degrees
if o == 45:
return _SEGMENTS[37] # dash
if o >= 65 and o <= 90:
return _SEGMENTS[o-55] # uppercase A-Z
if o >= 97 and o <= 122:
return _SEGMENTS[o-87] # lowercase a-z
if o >= 48 and o <= 57:
return _SEGMENTS[o-48] # 0-9
raise ValueError("Character out of range: {:d} '{:s}'".format(o, chr(o)))
def hex(self, val):
"""Display a hex value 0x0000 through 0xffff, right aligned."""
string = '{:04x}'.format(val & 0xffff)
self.write(self.encode_string(string))
def number(self, num):
"""Display a numeric value -999 through 9999, right aligned."""
# limit to range -999 to 9999
num = max(-999, min(num, 9999))
string = '{0: >4d}'.format(num)
self.write(self.encode_string(string))
def numbers(self, num1, num2, colon=True):
"""Display two numeric values -9 through 99, with leading zeros
and separated by a colon."""
num1 = max(-9, min(num1, 99))
num2 = max(-9, min(num2, 99))
segments = self.encode_string('{0:0>2d}{1:0>2d}'.format(num1, num2))
self.write(segments)
self.colon(colon)
def temperature(self, num):
if num < -9:
self.show('lo') # low
elif num > 99:
self.show('hi') # high
else:
string = '{0: >2d}'.format(num)
self.write(self.encode_string(string))
self.write([_SEGMENTS[38], _SEGMENTS[12]], 2) # degrees C
def show(self, string, colon=False):
segments = self.encode_string(string)
self.write(segments[:4])
self.colon(colon)
def scroll(self, string, delay=250):
segments = string if isinstance(string, list) else self.encode_string(string)
data = [0] * 8
data[4:0] = list(segments)
for i in range(len(segments) + 5):
self.write(data[0+i:4+i])
sleep_ms(delay)
| 33.269231 | 182 | 0.671346 |
120ea7653e523c8247e9c9be45c1102aea30813f | 8,635 | py | Python | tests/test_algorithm.py | adu461386118/PyRate | 0428dba9e2b3d4b6807f8c62d55c161c0dd4d75a | [
"Apache-2.0"
] | 1 | 2020-09-12T00:01:33.000Z | 2020-09-12T00:01:33.000Z | tests/test_algorithm.py | adu461386118/PyRate | 0428dba9e2b3d4b6807f8c62d55c161c0dd4d75a | [
"Apache-2.0"
] | null | null | null | tests/test_algorithm.py | adu461386118/PyRate | 0428dba9e2b3d4b6807f8c62d55c161c0dd4d75a | [
"Apache-2.0"
] | null | null | null | # This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the algorithm.py PyRate module.
"""
from datetime import date
from math import pi, cos, sin, radians
from numpy import array, reshape, squeeze
from os.path import join
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from pyrate.core.algorithm import (least_squares_covariance,
is_square,
unit_vector,
ifg_date_lookup,
get_all_epochs,
get_epochs,
first_second_ids,
factorise_integer,
)
from pyrate.core.config import parse_namelist
from pyrate.core.shared import Ifg, convert_radians_to_mm
from tests.common import small5_mock_ifgs, SML_TEST_TIF, UnitTestAdaptation
class TestLeastSquaresTests(UnitTestAdaptation):
"""
Unit tests for the PyRate least_squares_covariance() implementation.
"""
@staticmethod
def test_least_squares_covariance():
b = array([[13, 7.2, 5.7]]).T
A = array([[1, 0.4, 0.3], [1, 1, 1]]).T
v = array([[1, 1, 1]]).T
r = least_squares_covariance(A, b, v)
exp = [10.1628, 2.8744]
assert_array_almost_equal(r.T.squeeze(), exp, decimal=4)
def test_least_squares_covariance_overdetermined(self):
# must be overdetermined, ie. more observations than params
b = array([[10]]).T
A = array([[1]]).T
v = array([[1]]).T
self.assertRaises(ValueError, least_squares_covariance, A, b, v)
# try non transposed style
b = array([[10]])
A = array([[1]])
v = array([[1]])
self.assertRaises(ValueError, least_squares_covariance, A, b, v)
class TestAlgorithmTests(UnitTestAdaptation):
"""
Misc unittests for functions in the algorithm module.
"""
def test_factorise(self):
self.assertEqual(factorise_integer(1), (1, 1))
self.assertEqual(factorise_integer(2), (2, 1))
self.assertEqual(factorise_integer(4), (2, 2))
self.assertEqual(factorise_integer(9), (3, 3))
self.assertEqual(factorise_integer(76), (4, 19))
self.assertEqual(factorise_integer(76.5), (4, 19))
a, b = factorise_integer(12)
self.assertEqual(type(a), int)
self.assertEqual(type(b), int)
def test_is_square(self):
self.assertTrue(is_square(np.empty((2, 2))))
def test_is_not_square(self):
for shape in [(3, 2), (2, 3)]:
self.assertFalse(is_square(np.empty(shape)))
@staticmethod
def test_phase_conversion():
# ROIPAC interferograms in units of radians, verify conversion to mm
xs, ys = 5, 7
data = (np.arange(xs * ys) - 1.7) * 0.1 # fake a range of values
data = np.where(data == 0, np.nan, data)
wavelen = 0.0562356424
exp = (data * wavelen * 1000) / (4 * pi)
act = convert_radians_to_mm(data, wavelen)
assert_allclose(exp, act)
def test_unit_vector(self):
# last values here simulate a descending pass
incidence = [radians(x) for x in (34.3, 39.3, 29.3, 34.3)]
azimuth = [radians(x) for x in (77.8, 77.9, 80.0, 282.2)]
vert, ns, ew = [], [], []
for i, a in zip(incidence, azimuth):
vert.append(cos(i))
ns.append(sin(i) * cos(a))
ew.append(sin(i) * sin(a))
sh = 4
unitv = [array(ew), array(ns), array(vert)]
unitv = [a.reshape(sh) for a in unitv]
# NB: assumes radian inputs
act = unit_vector(reshape(incidence, sh), reshape(azimuth, sh))
for a, e in zip(act, unitv):
assert_array_almost_equal(squeeze(a), e)
# check unit vec components have correct signs
E, N, V = act
# test E/W component of ascending is +ve
self.assertTrue((E[:-2]).all() > 0)
self.assertTrue(E[-1] < 0) # test E/W component of descending is -ve
self.assertTrue((N > 0).all()) # ensure all north values are positive
# check unit vec components have correct magnitudes
self.assertTrue((abs(V) > abs(E)).all())
self.assertTrue((abs(V) > abs(N)).all())
self.assertTrue((abs(E) > abs(N)).all())
class TestDateLookup(UnitTestAdaptation):
"""
Tests for the algorithm.ifg_date_lookup() function.
"""
@classmethod
def setup_class(cls):
cls.ifgs = small5_mock_ifgs()
def test_ifg_date_lookup(self):
# check reverse lookup of ifg given a first and second date tuple
date_pair = (date(2006, 8, 28), date(2006, 12, 11))
i = ifg_date_lookup(self.ifgs, date_pair)
self.assertEqual(self.ifgs[0], i)
# test with reversed date tuple, should reorder it according to age
date_pair = (date(2006, 12, 11), date(2006, 11, 6))
i = ifg_date_lookup(self.ifgs, date_pair)
self.assertEqual(self.ifgs[1], i)
def test_ifg_date_lookup_failure(self):
# error when lookup cannot find an ifg given a date pair
dates = (date(2006, 12, 11), date(2007, 3, 26))
self.assertRaises(ValueError, ifg_date_lookup, self.ifgs, dates)
def test_date_lookup_bad_inputs(self):
# test some bad inputs to date lookup
inputs = [(None, None), (1, 10), (34.56, 345.93),
(date(2007, 3, 26), ""), (date(2007, 3, 26), None)]
for d in inputs:
self.assertRaises(ValueError, ifg_date_lookup, self.ifgs, d)
# TODO: InitialModelTests
#class InitialModelTests(unittest.TestCase):
# def test_initial_model(self):
# 1. fake an RSC file with coords
# 2. fake a ones(shape) # could also make a ramp etc
# data is single band of DISPLACEMENT
#raise NotImplementedError
class TestEpochs(UnitTestAdaptation):
"""
Unittests for the EpochList class.
"""
def test_get_epochs(self):
def str2date(s):
segs = s[:4], s[4:6], s[6:] # year, month, day
return date(*[int(sg) for sg in segs])
raw_date = ['20060619', '20060828', '20061002', '20061106', '20061211',
'20070115', '20070219', '20070326', '20070430', '20070604',
'20070709', '20070813', '20070917']
exp_dates = [str2date(d) for d in raw_date]
exp_repeat = [1, 1, 3, 3, 4, 3, 3, 3, 3, 3, 3, 2, 2]
exp_spans = [0, 0.1916, 0.2875, 0.3833, 0.4791, 0.5749, 0.6708, 0.7666,
0.8624, 0.9582, 1.0541, 1.1499, 1.2457]
ifms = join(SML_TEST_TIF, "ifms_17")
ifgs = [Ifg(join(SML_TEST_TIF, p)) for p in parse_namelist(ifms)]
for i in ifgs:
i.open()
epochs = get_epochs(ifgs)[0]
self.assertTrue((exp_dates == epochs.dates).all())
self.assertTrue((exp_repeat == epochs.repeat).all())
assert_array_almost_equal(exp_spans, epochs.spans, decimal=4)
def test_get_all_epochs(self):
# test function to extract all dates from sequence of ifgs
ifgs = small5_mock_ifgs()
for i in ifgs:
i.nodata_value = 0
dates = [date(2006, 8, 28), date(2006, 11, 6), date(2006, 12, 11),
date(2007, 1, 15), date(2007, 3, 26), date(2007, 9, 17)]
self.assertEqual(dates, sorted(set(get_all_epochs(ifgs))))
def test_get_epoch_count(self):
self.assertEqual(6, len(set(get_all_epochs(small5_mock_ifgs()))))
def test_first_second_ids(self):
d0 = date(2006, 6, 19)
d1 = date(2006, 8, 28)
d2 = date(2006, 10, 2)
d3 = date(2006, 11, 6)
exp = {d0: 0, d1: 1, d2: 2, d3: 3}
# test unordered and with duplicates
self.assertEqual(exp, first_second_ids([d3, d0, d2, d1]))
self.assertEqual(exp, first_second_ids([d3, d0, d2, d1, d3, d0]))
| 36.901709 | 79 | 0.600232 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.