hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72062f74e3d658e22fe2cb10addd4d8ad13e2b0 | 399 | py | Python | thefuck/rules/brew_uninstall.py | pybenchmark/thefuck | 993a661c6048063e84645015cc832602b6ec32df | [
"MIT"
] | 1 | 2021-12-13T18:41:46.000Z | 2021-12-13T18:41:46.000Z | thefuck/rules/brew_uninstall.py | pybenchmark/thefuck | 993a661c6048063e84645015cc832602b6ec32df | [
"MIT"
] | 4 | 2020-12-23T15:44:08.000Z | 2020-12-23T16:48:59.000Z | thefuck/rules/brew_uninstall.py | pybenchmark/thefuck | 993a661c6048063e84645015cc832602b6ec32df | [
"MIT"
] | 1 | 2020-12-23T14:46:54.000Z | 2020-12-23T14:46:54.000Z | from thefuck.utils import for_app
@for_app('brew', at_least=2)
def match(command):
return (command.script_parts[1] in ['uninstall', 'rm', 'remove']
and "brew uninstall --force" in command.stdout)
def get_new_command(command):
command_parts = command.script_parts[:]
command_parts[1] = 'uninstall'
command_parts.insert(2, '--force')
return ' '.join(command_parts)
| 26.6 | 68 | 0.684211 | from thefuck.utils import for_app
@for_app('brew', at_least=2)
def match(command):
return (command.script_parts[1] in ['uninstall', 'rm', 'remove']
and "brew uninstall --force" in command.stdout)
def get_new_command(command):
command_parts = command.script_parts[:]
command_parts[1] = 'uninstall'
command_parts.insert(2, '--force')
return ' '.join(command_parts)
| true | true |
f720630f134225e35a47baee79036aa6afb6bbf5 | 2,354 | py | Python | aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataAssetsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataAssetsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataAssetsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksddp.endpoint import endpoint_data
class DescribeDataAssetsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sddp', '2019-01-03', 'DescribeDataAssets')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RiskLevels(self):
return self.get_query_params().get('RiskLevels')
def set_RiskLevels(self,RiskLevels):
self.add_query_param('RiskLevels',RiskLevels)
def get_RangeId(self):
return self.get_query_params().get('RangeId')
def set_RangeId(self,RangeId):
self.add_query_param('RangeId',RangeId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId) | 31.810811 | 74 | 0.752336 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdksddp.endpoint import endpoint_data
class DescribeDataAssetsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sddp', '2019-01-03', 'DescribeDataAssets')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RiskLevels(self):
return self.get_query_params().get('RiskLevels')
def set_RiskLevels(self,RiskLevels):
self.add_query_param('RiskLevels',RiskLevels)
def get_RangeId(self):
return self.get_query_params().get('RangeId')
def set_RangeId(self,RangeId):
self.add_query_param('RangeId',RangeId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId) | true | true |
f720645e9f330d987e8a854268bf2c20aad7c1fa | 13,423 | py | Python | JChipSetup.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | 1 | 2020-02-05T11:22:03.000Z | 2020-02-05T11:22:03.000Z | JChipSetup.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | null | null | null | JChipSetup.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | null | null | null | import wx
import wx.lib.intctrl
import wx.lib.rcsizer as rcs
import socket
import sys
import re
import six
import datetime
import Model
import Utils
import JChip
import ChipReader
from JChip import EVT_CHIP_READER
import RaceResult
import Ultra
import HelpSearch
from ReadSignOnSheet import GetTagNums
HOST, PORT = JChip.DEFAULT_HOST, JChip.DEFAULT_PORT
def CheckExcelLink():
race = Model.race
if not race:
return (False, 'No active race.')
try:
externalFields = race.excelLink.getFields()
except (ValueError, AttributeError):
return (False, 'Unconfigured.')
if 'Tag' not in externalFields:
return (False, '"Tag" column not specified.')
return (True, 'Excel Link OK')
#------------------------------------------------------------------------------------------------
reIP = re.compile( '^[0-9.]+$' )
def GetAllIps():
addrInfo = socket.getaddrinfo( socket.gethostname(), None )
ips = set()
for a in addrInfo:
try:
ip = a[4][0]
except:
continue
if reIP.search(ip):
ips.add( ip )
return sorted( ips )
class JChipSetupDialog( wx.Dialog ):
def __init__( self, parent, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, _("Chip Reader Setup"),
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.timer = None
self.receivedCount = 0
self.refTime = None
self.enableJChipCheckBox = wx.CheckBox( self, label = _('Accept RFID Reader Data During Race') )
if Model.race:
self.enableJChipCheckBox.SetValue( getattr(Model.race, 'enableJChipIntegration', False) )
else:
self.enableJChipCheckBox.Enable( False )
self.testJChip = wx.ToggleButton( self, label = _('Start RFID Test') )
self.testJChip.SetFont( wx.Font( (0,24), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
self.Bind(wx.EVT_TOGGLEBUTTON, self.testJChipToggle, self.testJChip)
self.testList = wx.TextCtrl( self, style=wx.TE_READONLY|wx.TE_MULTILINE, size=(-1,200) )
self.testList.Bind( wx.EVT_RIGHT_DOWN, self.skip )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
self.helpBtn = wx.Button( self, wx.ID_HELP )
self.Bind( wx.EVT_BUTTON, lambda evt: HelpSearch.showHelp('Menu-ChipReader.html#chip-reader-setup'), self.helpBtn )
self.Bind(EVT_CHIP_READER, self.handleChipReaderEvent)
bs = wx.BoxSizer( wx.VERTICAL )
todoList = u'\n'.join( '%d) %s' % (i + 1, s) for i, s in enumerate( [
_('Make sure the RFID receiver is plugged into the network.'),
_('If you are using Impinj/Alien, make sure the CrossMgrImpinj or CrossMgrAlien bridge programs are running.'),
_('You must have the Sign-On Excel sheet ready and linked before your race.'),
_('You must configure a "Tag" field in your Sign-On Excel Sheet.'),
_('Run this test before each race.'),
]) )
intro = (u'\n'.join( [
_('CrossMgr supports the JChip, RaceResult, Ultra, Impinj and Alien RFID readers.'),
_('For more details, consult the documentation for your reader.'),
] ) + u'\n' + _('Checklist:') + u'\n\n{}\n').format( todoList )
border = 4
bs.Add( wx.StaticText(self, label = intro), 0, wx.EXPAND|wx.ALL, border )
bs.Add( self.enableJChipCheckBox, 0, wx.EXPAND|wx.ALL|wx.ALIGN_LEFT, border )
#-------------------------------------------------------------------
bs.AddSpacer( border )
bs.Add( wx.StaticText( self, label = _('Reader Configuration:') ), 0, wx.EXPAND|wx.ALL, border )
#-------------------------------------------------------------------
rowColSizer = rcs.RowColSizer()
bs.Add( rowColSizer, 0, wx.EXPAND|wx.ALL, border )
row = 0
rowColSizer.Add( wx.StaticText( self, label=u'{}:'.format(_('Reader Type')) ), row=row, col=0, border=border,
flag=wx.TOP|wx.LEFT|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
self.chipReaderType = wx.Choice( self, choices=[_('JChip/Impinj/Alien'), _('RaceResult'), _('Ultra')] )
self.chipReaderType.SetSelection( 0 )
self.chipReaderType.Bind( wx.EVT_CHOICE, self.changechipReaderType )
rowColSizer.Add( self.chipReaderType,
row=row, col=1, border=border, flag=wx.EXPAND|wx.TOP|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
sep = u' -' + _('or') + u'- '
ips = sep.join( GetAllIps() )
self.ipaddr = wx.TextCtrl( self, value = ips, style = wx.TE_READONLY, size=(240,-1) )
self.autoDetect = wx.Button( self, label=_('AutoDetect') )
self.autoDetect.Show( False )
self.autoDetect.Bind( wx.EVT_BUTTON, self.doAutoDetect )
iphs = wx.BoxSizer( wx.HORIZONTAL )
iphs.Add( self.ipaddr, 1, flag=wx.EXPAND )
iphs.Add( self.autoDetect, 0, flag=wx.LEFT, border=4 )
rowColSizer.Add( wx.StaticText( self, label=_('Remote IP Address:') ),
row=row, col=0, flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( iphs, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
self.port = wx.lib.intctrl.IntCtrl( self, -1, min=1, max=65535, value=PORT,
limited=True, style = wx.TE_READONLY )
rowColSizer.Add( wx.StaticText(self, label = _('Remote Port:')), row=row, col=0,
flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( self.port, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
bs.Add( wx.StaticText( self, label = _('If using JChip, see "7 Setting of Connections" in JChip "Control Panel Soft Manual" for more details.') ),
border=border, flag = wx.GROW|wx.ALL )
#-------------------------------------------------------------------
bs.Add( self.testJChip, 0, wx.ALIGN_CENTER|wx.ALL, border )
bs.Add( wx.StaticText(self, label = _('Messages:')), 0, wx.EXPAND|wx.ALL, border=border )
bs.Add( self.testList, 1, wx.EXPAND|wx.ALL, border )
buttonBox = wx.BoxSizer( wx.HORIZONTAL )
buttonBox.AddStretchSpacer()
buttonBox.Add( self.okBtn, flag = wx.RIGHT, border=border )
self.okBtn.SetDefault()
buttonBox.Add( self.cancelBtn )
buttonBox.Add( self.helpBtn )
bs.Add( buttonBox, 0, wx.EXPAND | wx.ALL, border )
self.stopTest()
self.SetSizerAndFit(bs)
bs.Fit( self )
self.update()
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def skip(self, evt):
return
def commit( self ):
race = Model.race
if not race:
return
race.chipReaderType = max( 0, self.chipReaderType.GetSelection() )
race.chipReaderIpAddr = self.ipaddr.GetValue()
if race.chipReaderType == 1:
Utils.writeConfig( 'RaceResultHost', race.chipReaderIpAddr )
elif race.chipReaderType == 2:
Utils.writeConfig( 'UltraHost', race.chipReaderIpAddr )
race.chipReaderPort = self.port.GetValue()
race.enableJChipIntegration = bool(self.enableJChipCheckBox.GetValue())
ChipReader.chipReaderCur.reset( race.chipReaderType )
def update( self ):
race = Model.race
if not race:
return
self.enableJChipCheckBox.SetValue( race.enableJChipIntegration )
self.chipReaderType.SetSelection( max(0, race.chipReaderType) )
self.ipaddr.SetValue( race.chipReaderIpAddr )
self.port.SetValue( race.chipReaderPort )
self.changechipReaderType()
def changechipReaderType( self, event=None ):
selection = self.chipReaderType.GetSelection()
if selection == 0: # JChip/CrossMgrImpinj/CrossMgrAlien
self.port.SetValue( JChip.DEFAULT_PORT )
self.port.SetEditable( False )
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.ipaddr.SetEditable( False )
self.autoDetect.Show( False )
elif selection == 1: # RaceResult
self.port.SetValue( RaceResult.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
elif selection == 2: # Ultra
self.port.SetValue( Ultra.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
self.Layout()
self.Refresh()
def doAutoDetect( self, event ):
selection = self.chipReaderType.GetSelection()
autoDetect = [RaceResult.AutoDetect, Ultra.AutoDetect][selection-1]
def getHost():
wait = wx.BusyCursor()
try:
return None, autoDetect(self.port.GetValue())
except Exception as e:
return e, None
error, readerHost = getHost()
if error:
Utils.MessageOK(
self,
u'{}:\n\n{}'.format(_("AutoDetect Error"), error),
_("AutoDetect Error"),
wx.ICON_ERROR
)
return
if not readerHost:
Utils.MessageOK(
self, u'{}:\n\n{}'.format(_("AutoDetect Failure"), _('Reader not found.')),
_("AutoDetect Failure"),
wx.ICON_ERROR
)
return
self.ipaddr.SetValue( readerHost )
def handleChipReaderEvent( self, event ):
if not event.tagTimes:
return
tagNums = {}
race = Model.race
if race:
if not race.enableUSBCamera:
return
tagNums = GetTagNums()
tag, dt = event.tagTimes[-1]
num = tagNums.get(tag, None)
def testJChipToggle( self, event ):
self.commit()
if not Model.race:
self.stopTest()
Utils.MessageOK( self, _('No active race. Cannot perform RFID test. "New" or "Open" a race first.'), _('Cannot Perform RFID Test') )
return
if Model.race.isRunning():
self.stopTest()
Utils.MessageOK( self, _('Cannot perform RFID test while race is running.'), _('Cannot Perform RFID Test') )
return
if self.testJChip.GetValue():
correct, reason = CheckExcelLink()
explain = _('CrossMgr will not be able to associate chip Tags with Bib numbers.') + u'\n' + \
_('You may proceed with the test, but you need to fix the Excel sheet.') + u'\n\n' + \
_('See documentation for details.')
if not correct:
if not Utils.MessageOKCancel( self, (_('Problems with Excel sheet.') + u'\n\n ' + _('Reason:') + u' {}\n\n{}').format(reason, explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
tagNums = GetTagNums( True )
if correct and not tagNums:
if not Utils.MessageOKCancel( self, (_('All Tag entries in the Excel sheet are blank.') + u'\n\n{}').format(explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
ChipReader.chipReaderCur.readerEventWindow = self
self.testList.Clear()
self.testJChip.SetLabel( 'Stop RFID Test' )
self.testJChip.SetBackgroundColour( wx.Colour(255,128,128) )
self.testJChip.SetValue( True )
ChipReader.chipReaderCur.StartListener()
self.appendMsg( 'listening for RFID connection...' )
# Start a timer to monitor the receiver.
self.receivedCount = 0
self.timer = wx.CallLater( 1000, self.onTimerCallback, 'started' )
else:
self.stopTest()
def appendMsg( self, s ):
self.testList.AppendText( s + '\n' )
def onTimerCallback( self, stat ):
data = ChipReader.chipReaderCur.GetData()
lastTag = None
for d in data:
if d[0] == 'data':
self.receivedCount += 1
ts = d[2].isoformat(' ')
if len(ts) == 8:
ts += '.00'
else:
ts = ts[:-2]
try:
num = '{}'.format(Model.race.tagNums[d[1]])
except (AttributeError, ValueError, KeyError):
num = 'not found'
lastTag = d[1]
self.appendMsg( '{}: tag={}, time={}, Bib={}'.format(self.receivedCount, d[1], ts, num) )
elif d[0] == 'connected':
self.appendMsg( '*******************************************' )
self.appendMsg( '{}: {}'.format(d[0], ', '.join('{}'.format(s) for s in d[1:]) ) )
elif d[0] == 'disconnected':
self.appendMsg( d[0] )
self.appendMsg( '' )
self.appendMsg( _('listening for RFID connection...') )
elif d[0] == 'name':
self.appendMsg( u'{}: {}'.format(_('receiver name'), d[1]) )
else:
self.appendMsg( '{}: {}'.format(d[0], ', '.join('<<{}>>'.format(s) for s in d[1:]) ) )
if data:
self.testList.SetInsertionPointEnd()
self.timer.Restart( 1000, 'restarted' )
if lastTag and Utils.mainWin and getattr(Utils.mainWin, 'findDialog', None):
if Utils.mainWin.findDialog.IsShown():
Utils.mainWin.findDialog.refresh( lastTag )
def stopTest( self ):
ChipReader.chipReaderCur.StopListener()
if self.timer:
self.timer.Stop()
self.timer = None
self.testList.Clear()
self.appendMsg( _('No test running.') )
ChipReader.chipReaderCur.readerEventWindow = None
self.testJChip.SetLabel( _('Start RFID Test') )
self.testJChip.SetBackgroundColour( wx.NullColour )
self.testJChip.SetValue( False )
def onOK( self, event ):
self.stopTest()
self.commit()
wx.CallAfter( Utils.refresh )
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.stopTest()
self.EndModal( wx.ID_CANCEL )
if __name__ == '__main__':
six.print_( GetAllIps() )
#sys.exit()
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,400))
Model.setRace( Model.Race() )
Model.race._populate()
Model.race.finishRaceNow()
Model.race.enableUSBCamera = True
mainWin.Show()
dlg = JChipSetupDialog( mainWin )
dlg.ShowModal()
dlg.Destroy()
| 33.896465 | 149 | 0.662147 | import wx
import wx.lib.intctrl
import wx.lib.rcsizer as rcs
import socket
import sys
import re
import six
import datetime
import Model
import Utils
import JChip
import ChipReader
from JChip import EVT_CHIP_READER
import RaceResult
import Ultra
import HelpSearch
from ReadSignOnSheet import GetTagNums
HOST, PORT = JChip.DEFAULT_HOST, JChip.DEFAULT_PORT
def CheckExcelLink():
race = Model.race
if not race:
return (False, 'No active race.')
try:
externalFields = race.excelLink.getFields()
except (ValueError, AttributeError):
return (False, 'Unconfigured.')
if 'Tag' not in externalFields:
return (False, '"Tag" column not specified.')
return (True, 'Excel Link OK')
reIP = re.compile( '^[0-9.]+$' )
def GetAllIps():
addrInfo = socket.getaddrinfo( socket.gethostname(), None )
ips = set()
for a in addrInfo:
try:
ip = a[4][0]
except:
continue
if reIP.search(ip):
ips.add( ip )
return sorted( ips )
class JChipSetupDialog( wx.Dialog ):
def __init__( self, parent, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, _("Chip Reader Setup"),
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.timer = None
self.receivedCount = 0
self.refTime = None
self.enableJChipCheckBox = wx.CheckBox( self, label = _('Accept RFID Reader Data During Race') )
if Model.race:
self.enableJChipCheckBox.SetValue( getattr(Model.race, 'enableJChipIntegration', False) )
else:
self.enableJChipCheckBox.Enable( False )
self.testJChip = wx.ToggleButton( self, label = _('Start RFID Test') )
self.testJChip.SetFont( wx.Font( (0,24), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
self.Bind(wx.EVT_TOGGLEBUTTON, self.testJChipToggle, self.testJChip)
self.testList = wx.TextCtrl( self, style=wx.TE_READONLY|wx.TE_MULTILINE, size=(-1,200) )
self.testList.Bind( wx.EVT_RIGHT_DOWN, self.skip )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
self.helpBtn = wx.Button( self, wx.ID_HELP )
self.Bind( wx.EVT_BUTTON, lambda evt: HelpSearch.showHelp('Menu-ChipReader.html#chip-reader-setup'), self.helpBtn )
self.Bind(EVT_CHIP_READER, self.handleChipReaderEvent)
bs = wx.BoxSizer( wx.VERTICAL )
todoList = u'\n'.join( '%d) %s' % (i + 1, s) for i, s in enumerate( [
_('Make sure the RFID receiver is plugged into the network.'),
_('If you are using Impinj/Alien, make sure the CrossMgrImpinj or CrossMgrAlien bridge programs are running.'),
_('You must have the Sign-On Excel sheet ready and linked before your race.'),
_('You must configure a "Tag" field in your Sign-On Excel Sheet.'),
_('Run this test before each race.'),
]) )
intro = (u'\n'.join( [
_('CrossMgr supports the JChip, RaceResult, Ultra, Impinj and Alien RFID readers.'),
_('For more details, consult the documentation for your reader.'),
] ) + u'\n' + _('Checklist:') + u'\n\n{}\n').format( todoList )
border = 4
bs.Add( wx.StaticText(self, label = intro), 0, wx.EXPAND|wx.ALL, border )
bs.Add( self.enableJChipCheckBox, 0, wx.EXPAND|wx.ALL|wx.ALIGN_LEFT, border )
bs.AddSpacer( border )
bs.Add( wx.StaticText( self, label = _('Reader Configuration:') ), 0, wx.EXPAND|wx.ALL, border )
rowColSizer = rcs.RowColSizer()
bs.Add( rowColSizer, 0, wx.EXPAND|wx.ALL, border )
row = 0
rowColSizer.Add( wx.StaticText( self, label=u'{}:'.format(_('Reader Type')) ), row=row, col=0, border=border,
flag=wx.TOP|wx.LEFT|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
self.chipReaderType = wx.Choice( self, choices=[_('JChip/Impinj/Alien'), _('RaceResult'), _('Ultra')] )
self.chipReaderType.SetSelection( 0 )
self.chipReaderType.Bind( wx.EVT_CHOICE, self.changechipReaderType )
rowColSizer.Add( self.chipReaderType,
row=row, col=1, border=border, flag=wx.EXPAND|wx.TOP|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
sep = u' -' + _('or') + u'- '
ips = sep.join( GetAllIps() )
self.ipaddr = wx.TextCtrl( self, value = ips, style = wx.TE_READONLY, size=(240,-1) )
self.autoDetect = wx.Button( self, label=_('AutoDetect') )
self.autoDetect.Show( False )
self.autoDetect.Bind( wx.EVT_BUTTON, self.doAutoDetect )
iphs = wx.BoxSizer( wx.HORIZONTAL )
iphs.Add( self.ipaddr, 1, flag=wx.EXPAND )
iphs.Add( self.autoDetect, 0, flag=wx.LEFT, border=4 )
rowColSizer.Add( wx.StaticText( self, label=_('Remote IP Address:') ),
row=row, col=0, flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( iphs, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
self.port = wx.lib.intctrl.IntCtrl( self, -1, min=1, max=65535, value=PORT,
limited=True, style = wx.TE_READONLY )
rowColSizer.Add( wx.StaticText(self, label = _('Remote Port:')), row=row, col=0,
flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( self.port, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
bs.Add( wx.StaticText( self, label = _('If using JChip, see "7 Setting of Connections" in JChip "Control Panel Soft Manual" for more details.') ),
border=border, flag = wx.GROW|wx.ALL )
bs.Add( self.testJChip, 0, wx.ALIGN_CENTER|wx.ALL, border )
bs.Add( wx.StaticText(self, label = _('Messages:')), 0, wx.EXPAND|wx.ALL, border=border )
bs.Add( self.testList, 1, wx.EXPAND|wx.ALL, border )
buttonBox = wx.BoxSizer( wx.HORIZONTAL )
buttonBox.AddStretchSpacer()
buttonBox.Add( self.okBtn, flag = wx.RIGHT, border=border )
self.okBtn.SetDefault()
buttonBox.Add( self.cancelBtn )
buttonBox.Add( self.helpBtn )
bs.Add( buttonBox, 0, wx.EXPAND | wx.ALL, border )
self.stopTest()
self.SetSizerAndFit(bs)
bs.Fit( self )
self.update()
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def skip(self, evt):
return
def commit( self ):
race = Model.race
if not race:
return
race.chipReaderType = max( 0, self.chipReaderType.GetSelection() )
race.chipReaderIpAddr = self.ipaddr.GetValue()
if race.chipReaderType == 1:
Utils.writeConfig( 'RaceResultHost', race.chipReaderIpAddr )
elif race.chipReaderType == 2:
Utils.writeConfig( 'UltraHost', race.chipReaderIpAddr )
race.chipReaderPort = self.port.GetValue()
race.enableJChipIntegration = bool(self.enableJChipCheckBox.GetValue())
ChipReader.chipReaderCur.reset( race.chipReaderType )
def update( self ):
race = Model.race
if not race:
return
self.enableJChipCheckBox.SetValue( race.enableJChipIntegration )
self.chipReaderType.SetSelection( max(0, race.chipReaderType) )
self.ipaddr.SetValue( race.chipReaderIpAddr )
self.port.SetValue( race.chipReaderPort )
self.changechipReaderType()
def changechipReaderType( self, event=None ):
selection = self.chipReaderType.GetSelection()
if selection == 0:
self.port.SetValue( JChip.DEFAULT_PORT )
self.port.SetEditable( False )
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.ipaddr.SetEditable( False )
self.autoDetect.Show( False )
elif selection == 1:
self.port.SetValue( RaceResult.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
elif selection == 2:
self.port.SetValue( Ultra.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
self.Layout()
self.Refresh()
def doAutoDetect( self, event ):
selection = self.chipReaderType.GetSelection()
autoDetect = [RaceResult.AutoDetect, Ultra.AutoDetect][selection-1]
def getHost():
wait = wx.BusyCursor()
try:
return None, autoDetect(self.port.GetValue())
except Exception as e:
return e, None
error, readerHost = getHost()
if error:
Utils.MessageOK(
self,
u'{}:\n\n{}'.format(_("AutoDetect Error"), error),
_("AutoDetect Error"),
wx.ICON_ERROR
)
return
if not readerHost:
Utils.MessageOK(
self, u'{}:\n\n{}'.format(_("AutoDetect Failure"), _('Reader not found.')),
_("AutoDetect Failure"),
wx.ICON_ERROR
)
return
self.ipaddr.SetValue( readerHost )
def handleChipReaderEvent( self, event ):
if not event.tagTimes:
return
tagNums = {}
race = Model.race
if race:
if not race.enableUSBCamera:
return
tagNums = GetTagNums()
tag, dt = event.tagTimes[-1]
num = tagNums.get(tag, None)
def testJChipToggle( self, event ):
self.commit()
if not Model.race:
self.stopTest()
Utils.MessageOK( self, _('No active race. Cannot perform RFID test. "New" or "Open" a race first.'), _('Cannot Perform RFID Test') )
return
if Model.race.isRunning():
self.stopTest()
Utils.MessageOK( self, _('Cannot perform RFID test while race is running.'), _('Cannot Perform RFID Test') )
return
if self.testJChip.GetValue():
correct, reason = CheckExcelLink()
explain = _('CrossMgr will not be able to associate chip Tags with Bib numbers.') + u'\n' + \
_('You may proceed with the test, but you need to fix the Excel sheet.') + u'\n\n' + \
_('See documentation for details.')
if not correct:
if not Utils.MessageOKCancel( self, (_('Problems with Excel sheet.') + u'\n\n ' + _('Reason:') + u' {}\n\n{}').format(reason, explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
tagNums = GetTagNums( True )
if correct and not tagNums:
if not Utils.MessageOKCancel( self, (_('All Tag entries in the Excel sheet are blank.') + u'\n\n{}').format(explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
ChipReader.chipReaderCur.readerEventWindow = self
self.testList.Clear()
self.testJChip.SetLabel( 'Stop RFID Test' )
self.testJChip.SetBackgroundColour( wx.Colour(255,128,128) )
self.testJChip.SetValue( True )
ChipReader.chipReaderCur.StartListener()
self.appendMsg( 'listening for RFID connection...' )
self.receivedCount = 0
self.timer = wx.CallLater( 1000, self.onTimerCallback, 'started' )
else:
self.stopTest()
def appendMsg( self, s ):
self.testList.AppendText( s + '\n' )
def onTimerCallback( self, stat ):
data = ChipReader.chipReaderCur.GetData()
lastTag = None
for d in data:
if d[0] == 'data':
self.receivedCount += 1
ts = d[2].isoformat(' ')
if len(ts) == 8:
ts += '.00'
else:
ts = ts[:-2]
try:
num = '{}'.format(Model.race.tagNums[d[1]])
except (AttributeError, ValueError, KeyError):
num = 'not found'
lastTag = d[1]
self.appendMsg( '{}: tag={}, time={}, Bib={}'.format(self.receivedCount, d[1], ts, num) )
elif d[0] == 'connected':
self.appendMsg( '*******************************************' )
self.appendMsg( '{}: {}'.format(d[0], ', '.join('{}'.format(s) for s in d[1:]) ) )
elif d[0] == 'disconnected':
self.appendMsg( d[0] )
self.appendMsg( '' )
self.appendMsg( _('listening for RFID connection...') )
elif d[0] == 'name':
self.appendMsg( u'{}: {}'.format(_('receiver name'), d[1]) )
else:
self.appendMsg( '{}: {}'.format(d[0], ', '.join('<<{}>>'.format(s) for s in d[1:]) ) )
if data:
self.testList.SetInsertionPointEnd()
self.timer.Restart( 1000, 'restarted' )
if lastTag and Utils.mainWin and getattr(Utils.mainWin, 'findDialog', None):
if Utils.mainWin.findDialog.IsShown():
Utils.mainWin.findDialog.refresh( lastTag )
def stopTest( self ):
ChipReader.chipReaderCur.StopListener()
if self.timer:
self.timer.Stop()
self.timer = None
self.testList.Clear()
self.appendMsg( _('No test running.') )
ChipReader.chipReaderCur.readerEventWindow = None
self.testJChip.SetLabel( _('Start RFID Test') )
self.testJChip.SetBackgroundColour( wx.NullColour )
self.testJChip.SetValue( False )
def onOK( self, event ):
self.stopTest()
self.commit()
wx.CallAfter( Utils.refresh )
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.stopTest()
self.EndModal( wx.ID_CANCEL )
if __name__ == '__main__':
six.print_( GetAllIps() )
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,400))
Model.setRace( Model.Race() )
Model.race._populate()
Model.race.finishRaceNow()
Model.race.enableUSBCamera = True
mainWin.Show()
dlg = JChipSetupDialog( mainWin )
dlg.ShowModal()
dlg.Destroy()
| true | true |
f72064b70b67fb3499b7878938b50a98033a491b | 845 | py | Python | examples/upload_video.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | examples/upload_video.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | examples/upload_video.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Use text editor to edit the script and type in valid Instagram username/password
import urllib
from bot.lib.InstagramAPI import InstagramAPI
video_url = 'https://instagram.fmad3-2.fna.fbcdn.net/t50.2886-16/17157217_1660580944235536_866261046376005632_n.mp4' #a valid instagram video
video_local_path = video_url.split("/")[-1]
thumbnail_url = "https://instagram.fmad3-2.fna.fbcdn.net/t51.2885-15/e15/17075853_1759410394387536_3927726791665385472_n.jpg"
thumbnail_local_path = thumbnail_url.split("/")[-1]
urllib.urlretrieve(video_url,video_local_path)
urllib.urlretrieve(thumbnail_url,thumbnail_local_path)
user,pwd = 'user', 'password'
InstagramAPI = InstagramAPI(user,pwd)
InstagramAPI.login() # login
InstagramAPI.uploadVideo(video_local_path,thumbnail_local_path,caption="Tortuguero")
| 36.73913 | 141 | 0.802367 |
import urllib
from bot.lib.InstagramAPI import InstagramAPI
video_url = 'https://instagram.fmad3-2.fna.fbcdn.net/t50.2886-16/17157217_1660580944235536_866261046376005632_n.mp4'
video_local_path = video_url.split("/")[-1]
thumbnail_url = "https://instagram.fmad3-2.fna.fbcdn.net/t51.2885-15/e15/17075853_1759410394387536_3927726791665385472_n.jpg"
thumbnail_local_path = thumbnail_url.split("/")[-1]
urllib.urlretrieve(video_url,video_local_path)
urllib.urlretrieve(thumbnail_url,thumbnail_local_path)
user,pwd = 'user', 'password'
InstagramAPI = InstagramAPI(user,pwd)
InstagramAPI.login()
InstagramAPI.uploadVideo(video_local_path,thumbnail_local_path,caption="Tortuguero")
| true | true |
f72065bfefeeb5caf657c46e57376538c3455609 | 4,517 | py | Python | lib/3rdparty/common/pyqode/core/modes/autocomplete.py | tommo/gii | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | [
"MIT"
] | 7 | 2016-02-13T18:47:23.000Z | 2020-07-03T13:47:49.000Z | lib/3rdparty/common/pyqode/core/modes/autocomplete.py | tommo/gii | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | [
"MIT"
] | 1 | 2018-06-13T04:55:27.000Z | 2021-11-05T05:52:51.000Z | lib/3rdparty/common/pyqode/core/modes/autocomplete.py | tommo/gii | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | [
"MIT"
] | 4 | 2016-02-15T13:32:46.000Z | 2019-12-12T17:22:31.000Z | # -*- coding: utf-8 -*-
""" Contains the AutoCompleteMode """
import logging
from pyqode.qt import QtCore, QtGui
from pyqode.core.api import TextHelper
from pyqode.core.api.mode import Mode
class AutoCompleteMode(Mode):
""" Automatically complete quotes and parentheses
Generic auto complete mode that automatically completes the following
symbols:
- " -> "
- ' -> '
- ( -> )
- [ -> ]
- { -> }
"""
#: Auto complete mapping, maps input key with completion text.
MAPPING = {'"': '"', "'": "'", "(": ")", "{": "}", "[": "]"}
#: The format to use for each symbol in mapping when there is a selection
SELECTED_QUOTES_FORMATS = {key: '%s%s%s' for key in MAPPING.keys()}
#: The format to use for each symbol in mapping when there is no selection
QUOTES_FORMATS = {key: '%s' for key in MAPPING.keys()}
def __init__(self):
super(AutoCompleteMode, self).__init__()
self.logger = logging.getLogger(__name__)
self._ignore_post = False
def on_state_changed(self, state):
if state:
self.editor.post_key_pressed.connect(self._on_post_key_pressed)
self.editor.key_pressed.connect(self._on_key_pressed)
else:
self.editor.post_key_pressed.disconnect(self._on_post_key_pressed)
self.editor.key_pressed.disconnect(self._on_key_pressed)
def _on_post_key_pressed(self, event):
if not event.isAccepted() and not self._ignore_post:
txt = event.text()
trav = self.editor.textCursor()
assert isinstance(trav, QtGui.QTextCursor)
trav.movePosition(trav.Left, trav.MoveAnchor, 2)
literal = TextHelper(self.editor).is_comment_or_string(trav)
if not literal:
next_char = TextHelper(self.editor).get_right_character()
if txt in self.MAPPING:
to_insert = self.MAPPING[txt]
if (not next_char or next_char in self.MAPPING.keys() or
next_char in self.MAPPING.values() or
next_char.isspace()):
TextHelper(self.editor).insert_text(
self.QUOTES_FORMATS[txt] % to_insert)
self._ignore_post = False
def _on_key_pressed(self, event):
txt = event.text()
cursor = self.editor.textCursor()
from pyqode.qt import QtGui
assert isinstance(cursor, QtGui.QTextCursor)
if cursor.hasSelection():
# quoting of selected text
if event.text() in self.MAPPING.keys():
first = event.text()
last = self.MAPPING[event.text()]
cursor.insertText(
self.SELECTED_QUOTES_FORMATS[event.text()] % (
first, cursor.selectedText(), last))
self.editor.setTextCursor(cursor)
event.accept()
else:
self._ignore_post = True
return
next_char = TextHelper(self.editor).get_right_character()
self.logger.debug('next char: %s', next_char)
ignore = False
if event.key() == QtCore.Qt.Key_Backspace:
# get the character that will get deleted
tc = self.editor.textCursor()
pos = tc.position()
tc.movePosition(tc.Left)
tc.movePosition(tc.Right, tc.KeepAnchor)
del_char = tc.selectedText()
if del_char in self.MAPPING and \
self.MAPPING[del_char] == next_char:
tc.beginEditBlock()
tc.movePosition(tc.Right, tc.KeepAnchor)
tc.insertText('')
tc.setPosition(pos - 2)
tc.endEditBlock()
self.editor.setTextCursor(tc)
ignore = True
elif txt and next_char == txt and next_char in self.MAPPING:
ignore = True
elif event.text() == ')' or event.text() == ']' or event.text() == '}':
# if typing the same symbol twice, the symbol should not be written
# and the cursor moved just after the char
# e.g. if you type ) just before ), the cursor will just move after
# the existing )
if next_char == event.text():
ignore = True
if ignore:
event.accept()
TextHelper(self.editor).clear_selection()
TextHelper(self.editor).move_right()
| 41.440367 | 79 | 0.570511 |
import logging
from pyqode.qt import QtCore, QtGui
from pyqode.core.api import TextHelper
from pyqode.core.api.mode import Mode
class AutoCompleteMode(Mode):
MAPPING = {'"': '"', "'": "'", "(": ")", "{": "}", "[": "]"}
SELECTED_QUOTES_FORMATS = {key: '%s%s%s' for key in MAPPING.keys()}
QUOTES_FORMATS = {key: '%s' for key in MAPPING.keys()}
def __init__(self):
super(AutoCompleteMode, self).__init__()
self.logger = logging.getLogger(__name__)
self._ignore_post = False
def on_state_changed(self, state):
if state:
self.editor.post_key_pressed.connect(self._on_post_key_pressed)
self.editor.key_pressed.connect(self._on_key_pressed)
else:
self.editor.post_key_pressed.disconnect(self._on_post_key_pressed)
self.editor.key_pressed.disconnect(self._on_key_pressed)
def _on_post_key_pressed(self, event):
if not event.isAccepted() and not self._ignore_post:
txt = event.text()
trav = self.editor.textCursor()
assert isinstance(trav, QtGui.QTextCursor)
trav.movePosition(trav.Left, trav.MoveAnchor, 2)
literal = TextHelper(self.editor).is_comment_or_string(trav)
if not literal:
next_char = TextHelper(self.editor).get_right_character()
if txt in self.MAPPING:
to_insert = self.MAPPING[txt]
if (not next_char or next_char in self.MAPPING.keys() or
next_char in self.MAPPING.values() or
next_char.isspace()):
TextHelper(self.editor).insert_text(
self.QUOTES_FORMATS[txt] % to_insert)
self._ignore_post = False
def _on_key_pressed(self, event):
txt = event.text()
cursor = self.editor.textCursor()
from pyqode.qt import QtGui
assert isinstance(cursor, QtGui.QTextCursor)
if cursor.hasSelection():
if event.text() in self.MAPPING.keys():
first = event.text()
last = self.MAPPING[event.text()]
cursor.insertText(
self.SELECTED_QUOTES_FORMATS[event.text()] % (
first, cursor.selectedText(), last))
self.editor.setTextCursor(cursor)
event.accept()
else:
self._ignore_post = True
return
next_char = TextHelper(self.editor).get_right_character()
self.logger.debug('next char: %s', next_char)
ignore = False
if event.key() == QtCore.Qt.Key_Backspace:
tc = self.editor.textCursor()
pos = tc.position()
tc.movePosition(tc.Left)
tc.movePosition(tc.Right, tc.KeepAnchor)
del_char = tc.selectedText()
if del_char in self.MAPPING and \
self.MAPPING[del_char] == next_char:
tc.beginEditBlock()
tc.movePosition(tc.Right, tc.KeepAnchor)
tc.insertText('')
tc.setPosition(pos - 2)
tc.endEditBlock()
self.editor.setTextCursor(tc)
ignore = True
elif txt and next_char == txt and next_char in self.MAPPING:
ignore = True
elif event.text() == ')' or event.text() == ']' or event.text() == '}':
if next_char == event.text():
ignore = True
if ignore:
event.accept()
TextHelper(self.editor).clear_selection()
TextHelper(self.editor).move_right()
| true | true |
f720669f2683fff61a73382464913841475adbc5 | 18,762 | py | Python | mslib/mswms/dataaccess.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/mswms/dataaccess.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/mswms/dataaccess.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
mslib.mswms.dataaccess
~~~~~~~~~~~~~~~~~~~~~~
This module provides functions to access data
This file is part of mss.
:copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.
:copyright: Copyright 2011-2014 Marc Rautenhaus (mr)
:copyright: Copyright 2016-2020 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABCMeta, abstractmethod
import itertools
import os
import logging
import netCDF4
import numpy as np
import pint
from mslib import netCDF4tools
from mslib.utils import UR
class NWPDataAccess(metaclass=ABCMeta):
"""Abstract superclass providing a framework to let the user query
in which data file a given variable at a given time can be found.
The class provides the method get_filename(). It derives filenames from
CF variable names, initialisation and valid times.q
The method get_datapath() provides the root path where the data
can be found.
In subclasses, the protected method _determine_filename() must be
implemented.
"""
def __init__(self, rootpath, uses_init_time=True, uses_valid_time=True):
"""Constructor takes the path of the data directory and determines whether
this class employs different init_times or valid_times.
"""
self._root_path = rootpath
self._modelname = ""
self._use_init_time = uses_init_time
self._use_valid_time = uses_valid_time
@abstractmethod
def setup(self):
"""Checks for existing files etc. and sets up the class. Called by
server whenever a client requests a current capability document.
"""
pass
def have_data(self, variable, vartype, init_time, valid_time):
"""Checks whether a file with data for the specified variable,
type and times is known. This does not trigger a search for
updated data files on disk.
"""
try:
self._determine_filename(
variable, vartype, init_time, valid_time, reload=False)
except ValueError:
return False
else:
return True
def get_filename(self, variable, vartype, init_time, valid_time,
fullpath=False):
"""Get the filename of the file in which a given variable at
a given time can be found.
In case no file is available, the disk is searched for updated
data before failing.
Arguments:
variable -- string with CF name of variable
vartype -- string specifying the type of the variable (model specific).
For example, can be ml (model level), pl (pressure level),
or sfc (surface) for, e.g., ECMWF data.
init_time -- datetime object with initialisation time of forecast run
valid_time -- datetime object with valid time of forecast
fullpath -- if True, the complete path to the file will be returned.
Default is False, only the filename will be returned.
"""
filename = self._determine_filename(variable, vartype,
init_time, valid_time)
if fullpath:
return os.path.join(self._root_path, filename)
else:
return filename
@abstractmethod
def _determine_filename(self, variable, vartype, init_time, valid_time):
"""Must be overwritten in subclass. Determines the filename
(without path) of the variable <variable> at the forecast
timestep specified by init_time and valid_time.
"""
pass
def get_datapath(self):
"""Return the path to the data directory.
"""
return self._root_path
def uses_inittime_dimension(self):
""" Return whether this data set supports multiple init times
"""
return self._use_init_time
def uses_validtime_dimension(self):
""" Return whether this data set supports multiple valid times
"""
return self._use_valid_time
@abstractmethod
def get_all_datafiles(self):
"""Return a list of all available data files.
"""
pass
@abstractmethod
def get_init_times(self):
"""Return a list of available forecast init times (base times).
"""
pass
@abstractmethod
def get_valid_times(self):
"""Return a list of available forecast times.
"""
pass
@abstractmethod
def get_elevations(self, vert_type):
"""Return a list of available elevations for a vertical level type.
"""
pass
@abstractmethod
def get_elevation_units(self, vert_type):
"""Returns units of supplied vertical type.
"""
pass
_mfDatasetArgsDict = {}
def mfDatasetArgs(self):
"""Returns additional keyword for the MFDatasetCommonDims instance that
handles the input data of this dataset. See the MFDatasetCommonDims
documentation for further details.
Mainly provided as a workaround for numerical inaccuracies introduced
to the NetCDF files by netcdf-java 4.3.
(mr, 16Oct2012)
"""
return self._mfDatasetArgsDict
class DefaultDataAccess(NWPDataAccess):
"""
Subclass to NWPDataAccess for accessing properly constructed NetCDF files
Constructor needs information on domain ID.
"""
# Workaround for the numerical issue concering the lon dimension in
# NetCDF files produced by netcdf-java 4.3..
def __init__(self, rootpath, domain_id, skip_dim_check=[], **kwargs):
"""Constructor takes the path of the data directory and determines whether
this class employs different init_times or valid_times.
"""
NWPDataAccess.__init__(self, rootpath, **kwargs)
self._domain_id = domain_id
self._available_files = None
self._filetree = None
self._mfDatasetArgsDict = {"skip_dim_check": skip_dim_check}
def _determine_filename(self, variable, vartype, init_time, valid_time, reload=True):
"""Determines the name of the data file that contains
the variable <variable> with type <vartype> of the forecast specified
by <init_time> and <valid_time>.
"""
assert self._filetree is not None, "filetree is None. Forgot to call setup()?"
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError:
if reload:
self.setup()
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError as ex:
logging.error("Could not identify filename. %s %s %s %s %s %s",
variable, vartype, init_time, valid_time, type(ex), ex)
raise ValueError("variable type {} not available for variable {}"
.format(vartype, variable))
def _parse_file(self, filename):
elevations = {"levels": [], "units": None}
with netCDF4.Dataset(os.path.join(self._root_path, filename)) as dataset:
time_name, time_var = netCDF4tools.identify_CF_time(dataset)
init_time = netCDF4tools.num2date(0, time_var.units)
if not self.uses_inittime_dimension():
init_time = None
valid_times = netCDF4tools.num2date(time_var[:], time_var.units)
if not self.uses_validtime_dimension():
if len(valid_times) > 0:
raise IOError("Skipping file '{}: no support for valid time, but multiple "
"time steps present".format(filename))
valid_times = [None]
lat_name, lat_var, lon_name, lon_var = netCDF4tools.identify_CF_lonlat(dataset)
vert_name, vert_var, _, _, vert_type = netCDF4tools.identify_vertical_axis(dataset)
if len(time_var.dimensions) != 1 or time_var.dimensions[0] != time_name:
raise IOError("Problem with time coordinate variable")
if len(lat_var.dimensions) != 1 or lat_var.dimensions[0] != lat_name:
raise IOError("Problem with latitude coordinate variable")
if len(lon_var.dimensions) != 1 or lon_var.dimensions[0] != lon_name:
raise IOError("Problem with longitude coordinate variable")
if vert_type != "sfc":
elevations = {"levels": vert_var[:], "units": vert_var.units}
if vert_type in self._elevations:
if len(vert_var[:]) != len(self._elevations[vert_type]["levels"]):
raise IOError("Number of vertical levels does not fit to levels of "
"previous file '{}'.".format(self._elevations[vert_type]["filename"]))
if not np.allclose(vert_var[:], self._elevations[vert_type]["levels"]):
raise IOError("vertical levels do not fit to levels of previous "
"file '{}'.".format(self._elevations[vert_type]["filename"]))
if elevations["units"] != self._elevations[vert_type]["units"]:
raise IOError("vertical level units do not match previous file '{}'".format(
self._elevations[vert_type]["filename"]))
standard_names = []
for ncvarname, ncvar in dataset.variables.items():
if hasattr(ncvar, "standard_name"):
if (len(ncvar.dimensions) >= 3 and (
ncvar.dimensions[0] != time_name or
ncvar.dimensions[-2] != lat_name or
ncvar.dimensions[-1] != lon_name)):
logging.error("Skipping variable '%s' in file '%s': Incorrect order of dimensions",
ncvarname, filename)
continue
if not hasattr(ncvar, "units"):
logging.error("Skipping variable '%s' in file '%s': No units attribute",
ncvarname, filename)
continue
if ncvar.standard_name != "time":
try:
UR(ncvar.units)
except (ValueError, pint.UndefinedUnitError):
logging.error("Skipping variable '%s' in file '%s': unparseable units attribute '%s'",
ncvarname, filename, ncvar.units)
continue
if len(ncvar.shape) == 4 and vert_name in ncvar.dimensions:
standard_names.append(ncvar.standard_name)
elif len(ncvar.shape) == 3 and vert_type == "sfc":
standard_names.append(ncvar.standard_name)
return {
"vert_type": vert_type,
"elevations": elevations,
"init_time": init_time,
"valid_times": valid_times,
"standard_names": standard_names
}
def _add_to_filetree(self, filename, content):
logging.info("File '%s' identified as '%s' type", filename, content["vert_type"])
logging.info("Found init time '%s', %s valid_times and %s standard_names",
content["init_time"], len(content["valid_times"]), len(content["standard_names"]))
if len(content["valid_times"]) == 0 or len(content["standard_names"]) == 0:
logging.error(
"Something is wrong with this file... valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
else:
logging.debug("valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
leaf = self._filetree.setdefault(content["vert_type"], {}).setdefault(content["init_time"], {})
for standard_name in content["standard_names"]:
var_leaf = leaf.setdefault(standard_name, {})
for valid_time in content["valid_times"]:
if valid_time in var_leaf:
logging.warning(
"some data was found twice! vartype='%s' init_time='%s' standard_name='%s' "
"valid_time='%s' first_file='%s' second_file='%s'",
content["vert_type"], content["init_time"], standard_name,
valid_time, var_leaf[valid_time], filename)
else:
var_leaf[valid_time] = filename
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in sorted(os.listdir(self._root_path)) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": [], "units": None}}
# Build the tree structure.
for filename in self._available_files:
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
self._add_to_filetree(filename, content)
def get_init_times(self):
"""Returns a list of available forecast init times (base times).
"""
init_times = set(itertools.chain.from_iterable(
self._filetree[_x].keys() for _x in self._filetree))
return sorted(init_times)
def get_valid_times(self, variable, vartype, init_time):
"""Returns a list of available valid times for the specified
variable at the specified init time.
"""
try:
return sorted(self._filetree[vartype][init_time][variable])
except KeyError as ex:
logging.error("Could not find times! %s %s", type(ex), ex)
return []
def get_elevations(self, vert_type):
"""Return a list of available elevations for a vertical level type.
"""
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["levels"]
def get_elevation_units(self, vert_type):
"""Return a list of available elevations for a vertical level type.
"""
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["units"]
def get_all_valid_times(self, variable, vartype):
"""Similar to get_valid_times(), but returns the combined valid times
of all available init times.
"""
all_valid_times = []
if vartype not in self._filetree:
return []
for init_time in self._filetree[vartype]:
if variable in self._filetree[vartype][init_time]:
all_valid_times.extend(list(self._filetree[vartype][init_time][variable]))
return sorted(set(all_valid_times))
def get_all_datafiles(self):
"""Return a list of all available data files.
"""
return self._available_files
class CachedDataAccess(DefaultDataAccess):
"""
Subclass to NWPDataAccess for accessing properly constructed NetCDF files
Constructor needs information on domain ID.
Uses file name and modification date to reduce setup time by caching directory
content in a dictionary.
"""
def __init__(self, rootpath, domain_id, **kwargs):
"""Constructor takes the path of the data directory and determines whether
this class employs different init_times or valid_times.
"""
DefaultDataAccess.__init__(self, rootpath, domain_id, **kwargs)
self._file_cache = {}
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in os.listdir(self._root_path) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
for filename in list(self._file_cache):
if filename not in self._available_files:
del self._file_cache[filename]
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": []}}
# Build the tree structure.
for filename in self._available_files:
mtime = os.path.getmtime(os.path.join(self._root_path, filename))
if filename in self._file_cache and mtime == self._file_cache[filename][0]:
logging.info("Using cached candidate '%s'", filename)
content = self._file_cache[filename][1]
if content["vert_type"] != "sfc":
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
elif not np.allclose(
self._elevations[content["vert_type"]]["levels"],
content["elevations"]["levels"]):
logging.error("Skipping file '%s' due to elevation mismatch", filename)
continue
else:
if filename in self._file_cache:
del self._file_cache[filename]
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
self._file_cache[filename] = (mtime, content)
self._add_to_filetree(filename, content)
| 43.531323 | 114 | 0.600149 |
from abc import ABCMeta, abstractmethod
import itertools
import os
import logging
import netCDF4
import numpy as np
import pint
from mslib import netCDF4tools
from mslib.utils import UR
class NWPDataAccess(metaclass=ABCMeta):
def __init__(self, rootpath, uses_init_time=True, uses_valid_time=True):
self._root_path = rootpath
self._modelname = ""
self._use_init_time = uses_init_time
self._use_valid_time = uses_valid_time
@abstractmethod
def setup(self):
pass
def have_data(self, variable, vartype, init_time, valid_time):
try:
self._determine_filename(
variable, vartype, init_time, valid_time, reload=False)
except ValueError:
return False
else:
return True
def get_filename(self, variable, vartype, init_time, valid_time,
fullpath=False):
filename = self._determine_filename(variable, vartype,
init_time, valid_time)
if fullpath:
return os.path.join(self._root_path, filename)
else:
return filename
@abstractmethod
def _determine_filename(self, variable, vartype, init_time, valid_time):
pass
def get_datapath(self):
return self._root_path
def uses_inittime_dimension(self):
return self._use_init_time
def uses_validtime_dimension(self):
return self._use_valid_time
@abstractmethod
def get_all_datafiles(self):
pass
@abstractmethod
def get_init_times(self):
pass
@abstractmethod
def get_valid_times(self):
pass
@abstractmethod
def get_elevations(self, vert_type):
pass
@abstractmethod
def get_elevation_units(self, vert_type):
pass
_mfDatasetArgsDict = {}
def mfDatasetArgs(self):
return self._mfDatasetArgsDict
class DefaultDataAccess(NWPDataAccess):
def __init__(self, rootpath, domain_id, skip_dim_check=[], **kwargs):
NWPDataAccess.__init__(self, rootpath, **kwargs)
self._domain_id = domain_id
self._available_files = None
self._filetree = None
self._mfDatasetArgsDict = {"skip_dim_check": skip_dim_check}
def _determine_filename(self, variable, vartype, init_time, valid_time, reload=True):
assert self._filetree is not None, "filetree is None. Forgot to call setup()?"
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError:
if reload:
self.setup()
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError as ex:
logging.error("Could not identify filename. %s %s %s %s %s %s",
variable, vartype, init_time, valid_time, type(ex), ex)
raise ValueError("variable type {} not available for variable {}"
.format(vartype, variable))
def _parse_file(self, filename):
elevations = {"levels": [], "units": None}
with netCDF4.Dataset(os.path.join(self._root_path, filename)) as dataset:
time_name, time_var = netCDF4tools.identify_CF_time(dataset)
init_time = netCDF4tools.num2date(0, time_var.units)
if not self.uses_inittime_dimension():
init_time = None
valid_times = netCDF4tools.num2date(time_var[:], time_var.units)
if not self.uses_validtime_dimension():
if len(valid_times) > 0:
raise IOError("Skipping file '{}: no support for valid time, but multiple "
"time steps present".format(filename))
valid_times = [None]
lat_name, lat_var, lon_name, lon_var = netCDF4tools.identify_CF_lonlat(dataset)
vert_name, vert_var, _, _, vert_type = netCDF4tools.identify_vertical_axis(dataset)
if len(time_var.dimensions) != 1 or time_var.dimensions[0] != time_name:
raise IOError("Problem with time coordinate variable")
if len(lat_var.dimensions) != 1 or lat_var.dimensions[0] != lat_name:
raise IOError("Problem with latitude coordinate variable")
if len(lon_var.dimensions) != 1 or lon_var.dimensions[0] != lon_name:
raise IOError("Problem with longitude coordinate variable")
if vert_type != "sfc":
elevations = {"levels": vert_var[:], "units": vert_var.units}
if vert_type in self._elevations:
if len(vert_var[:]) != len(self._elevations[vert_type]["levels"]):
raise IOError("Number of vertical levels does not fit to levels of "
"previous file '{}'.".format(self._elevations[vert_type]["filename"]))
if not np.allclose(vert_var[:], self._elevations[vert_type]["levels"]):
raise IOError("vertical levels do not fit to levels of previous "
"file '{}'.".format(self._elevations[vert_type]["filename"]))
if elevations["units"] != self._elevations[vert_type]["units"]:
raise IOError("vertical level units do not match previous file '{}'".format(
self._elevations[vert_type]["filename"]))
standard_names = []
for ncvarname, ncvar in dataset.variables.items():
if hasattr(ncvar, "standard_name"):
if (len(ncvar.dimensions) >= 3 and (
ncvar.dimensions[0] != time_name or
ncvar.dimensions[-2] != lat_name or
ncvar.dimensions[-1] != lon_name)):
logging.error("Skipping variable '%s' in file '%s': Incorrect order of dimensions",
ncvarname, filename)
continue
if not hasattr(ncvar, "units"):
logging.error("Skipping variable '%s' in file '%s': No units attribute",
ncvarname, filename)
continue
if ncvar.standard_name != "time":
try:
UR(ncvar.units)
except (ValueError, pint.UndefinedUnitError):
logging.error("Skipping variable '%s' in file '%s': unparseable units attribute '%s'",
ncvarname, filename, ncvar.units)
continue
if len(ncvar.shape) == 4 and vert_name in ncvar.dimensions:
standard_names.append(ncvar.standard_name)
elif len(ncvar.shape) == 3 and vert_type == "sfc":
standard_names.append(ncvar.standard_name)
return {
"vert_type": vert_type,
"elevations": elevations,
"init_time": init_time,
"valid_times": valid_times,
"standard_names": standard_names
}
def _add_to_filetree(self, filename, content):
logging.info("File '%s' identified as '%s' type", filename, content["vert_type"])
logging.info("Found init time '%s', %s valid_times and %s standard_names",
content["init_time"], len(content["valid_times"]), len(content["standard_names"]))
if len(content["valid_times"]) == 0 or len(content["standard_names"]) == 0:
logging.error(
"Something is wrong with this file... valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
else:
logging.debug("valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
leaf = self._filetree.setdefault(content["vert_type"], {}).setdefault(content["init_time"], {})
for standard_name in content["standard_names"]:
var_leaf = leaf.setdefault(standard_name, {})
for valid_time in content["valid_times"]:
if valid_time in var_leaf:
logging.warning(
"some data was found twice! vartype='%s' init_time='%s' standard_name='%s' "
"valid_time='%s' first_file='%s' second_file='%s'",
content["vert_type"], content["init_time"], standard_name,
valid_time, var_leaf[valid_time], filename)
else:
var_leaf[valid_time] = filename
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in sorted(os.listdir(self._root_path)) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": [], "units": None}}
# Build the tree structure.
for filename in self._available_files:
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
self._add_to_filetree(filename, content)
def get_init_times(self):
init_times = set(itertools.chain.from_iterable(
self._filetree[_x].keys() for _x in self._filetree))
return sorted(init_times)
def get_valid_times(self, variable, vartype, init_time):
try:
return sorted(self._filetree[vartype][init_time][variable])
except KeyError as ex:
logging.error("Could not find times! %s %s", type(ex), ex)
return []
def get_elevations(self, vert_type):
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["levels"]
def get_elevation_units(self, vert_type):
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["units"]
def get_all_valid_times(self, variable, vartype):
all_valid_times = []
if vartype not in self._filetree:
return []
for init_time in self._filetree[vartype]:
if variable in self._filetree[vartype][init_time]:
all_valid_times.extend(list(self._filetree[vartype][init_time][variable]))
return sorted(set(all_valid_times))
def get_all_datafiles(self):
return self._available_files
class CachedDataAccess(DefaultDataAccess):
def __init__(self, rootpath, domain_id, **kwargs):
DefaultDataAccess.__init__(self, rootpath, domain_id, **kwargs)
self._file_cache = {}
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in os.listdir(self._root_path) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
for filename in list(self._file_cache):
if filename not in self._available_files:
del self._file_cache[filename]
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": []}}
# Build the tree structure.
for filename in self._available_files:
mtime = os.path.getmtime(os.path.join(self._root_path, filename))
if filename in self._file_cache and mtime == self._file_cache[filename][0]:
logging.info("Using cached candidate '%s'", filename)
content = self._file_cache[filename][1]
if content["vert_type"] != "sfc":
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
elif not np.allclose(
self._elevations[content["vert_type"]]["levels"],
content["elevations"]["levels"]):
logging.error("Skipping file '%s' due to elevation mismatch", filename)
continue
else:
if filename in self._file_cache:
del self._file_cache[filename]
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
self._file_cache[filename] = (mtime, content)
self._add_to_filetree(filename, content)
| true | true |
f7206716b828e3c1c1ab325cae68cae705a72727 | 9,010 | py | Python | scalability/experiments/run_mainnet.py | Deland-Labs/ic | 047172b01e0afc0e61448669d4ec98b2425c6853 | [
"Apache-2.0"
] | 1 | 2021-12-01T03:48:42.000Z | 2021-12-01T03:48:42.000Z | scalability/experiments/run_mainnet.py | Deland-Labs/ic | 047172b01e0afc0e61448669d4ec98b2425c6853 | [
"Apache-2.0"
] | null | null | null | scalability/experiments/run_mainnet.py | Deland-Labs/ic | 047172b01e0afc0e61448669d4ec98b2425c6853 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import subprocess
import sys
import time
from typing import List
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_bool("use_updates", False, "Issue update calls instead of query calls")
class Mainnet:
"""Wrapper to run against subnetworks in mainnet concurrently."""
def __init__(self):
"""Initialize."""
if FLAGS.testnet == "mercury" and FLAGS.target_subnet_id is None:
raise Exception("--target_subnet_id has to be set when running against mainnet")
# Testnets you have booked and the number of subnetworks each (including NNS)
self.testnets = {
"large01": 5,
# "large02": 5,
"large03": 5,
"large04": 5,
"large05": 5,
# "medium01": 2,
# "medium03": 2,
"medium04": 2,
# "medium06": 2,
# "medium07": 2,
# "medium08": 2,
# "medium09": 2,
}
# All subnets with the ID of the counter canister.
# Uncomment if you want to run against that subnetwork.
# sum(self.testnets.items()) has to be larger than the number of subnets uncommented here.
self.load_targets = {
# --- pjljw has a lot of traffic, so perhaps avoid
# "pjljw-kztyl-46ud4-ofrj6-nzkhm-3n4nt-wi3jt-ypmav-ijqkt-gjf66-uae": "ifkln-viaaa-aaaah-qccva-cai",
"ejbmu-grnam-gk6ol-6irwa-htwoj-7ihfl-goimw-hlnvh-abms4-47v2e-zqe": "nffi3-byaaa-aaaae-qaava-cai",
# # 404 - [MessageId(...)]: Update returned non-202: 404
"gmq5v-hbozq-uui6y-o55wc-ihop3-562wb-3qspg-nnijg-npqp5-he3cj-3ae": "phin2-eyaaa-aaaak-qaaca-cai",
"opn46-zyspe-hhmyp-4zu6u-7sbrh-dok77-m7dch-im62f-vyimr-a3n2c-4ae": "psp4x-fqaaa-aaaak-qaabq-cai",
# # normal
"w4asl-4nmyj-qnr7c-6cqq4-tkwmt-o26di-iupkq-vx4kt-asbrx-jzuxh-4ae": "wrd4y-xiaaa-aaaac-qaaaq-cai",
"lspz2-jx4pu-k3e7p-znm7j-q4yum-ork6e-6w4q6-pijwq-znehu-4jabe-kqe": "m4dvk-faaaa-aaaag-aaaba-cai",
"k44fs-gm4pv-afozh-rs7zw-cg32n-u7xov-xqyx3-2pw5q-eucnu-cosd4-uqe": "cst46-ryaaa-aaaak-aaaha-cai",
"lhg73-sax6z-2zank-6oer2-575lz-zgbxx-ptudx-5korm-fy7we-kh4hl-pqe": "anvl4-jaaaa-aaaag-qaaca-cai",
"brlsh-zidhj-3yy3e-6vqbz-7xnih-xeq2l-as5oc-g32c4-i5pdn-2wwof-oae": "qnlji-3yaaa-aaaai-aa2aq-cai",
"mpubz-g52jc-grhjo-5oze5-qcj74-sex34-omprz-ivnsm-qvvhr-rfzpv-vae": "2zwmb-wyaaa-aaaai-qa2vq-cai",
"qdvhd-os4o2-zzrdw-xrcv4-gljou-eztdp-bj326-e6jgr-tkhuc-ql6v2-yqe": "ivomh-taaaa-aaaaj-aac2a-cai",
"jtdsg-3h6gi-hs7o5-z2soi-43w3z-soyl3-ajnp3-ekni5-sw553-5kw67-nqe": "tiezx-5yaaa-aaaaj-qagya-cai",
"io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "ftuvq-daaaa-aaaad-aaaqa-cai",
"5kdm2-62fc6-fwnja-hutkz-ycsnm-4z33i-woh43-4cenu-ev7mi-gii6t-4ae": "iqjyz-jqaaa-aaaad-qayoa-cai",
"4zbus-z2bmt-ilreg-xakz4-6tyre-hsqj4-slb4g-zjwqo-snjcc-iqphi-3qe": "2oxpg-ayaaa-aaaac-aaacq-cai",
"qxesv-zoxpm-vc64m-zxguk-5sj74-35vrb-tbgwg-pcird-5gr26-62oxl-cae": "htg4w-ziaaa-aaaab-aabaa-cai",
"shefu-t3kr5-t5q3w-mqmdq-jabyv-vyvtf-cyyey-3kmo4-toyln-emubw-4qe": "2vzmb-ayaaa-aaaae-aaf3q-cai",
"csyj4-zmann-ys6ge-3kzi6-onexi-obayx-2fvak-zersm-euci4-6pslt-lae": "cozrd-caaaa-aaaaf-qaeua-cai",
"eq6en-6jqla-fbu5s-daskr-h6hx2-376n5-iqabl-qgrng-gfqmv-n3yjr-mqe": "34i5c-taaaa-aaaaf-aaa2q-cai",
"snjp4-xlbw4-mnbog-ddwy6-6ckfd-2w5a2-eipqo-7l436-pxqkh-l6fuv-vae": "3muos-6yaaa-aaaaa-qaaua-cai",
"pae4o-o6dxf-xki7q-ezclx-znyd6-fnk6w-vkv5z-5lfwh-xym2i-otrrw-fqe": "r7fsz-diaaa-aaaab-qadxa-cai",
}
# Next subnetwork to use for workload generators
self.next_subnet = {key: 0 for key in self.testnets.keys()}
total_subnetworks = sum(self.testnets.values())
total_targets = len(self.load_targets)
missing = total_targets - total_subnetworks
if total_targets > total_subnetworks:
print(
(
f"Insufficient testnets for load generation (have {total_subnetworks}, "
f"but {total_targets} load targets, {missing} more missing"
)
)
exit(1)
self.start_time = int(time.time())
def get_window_name(self, subnet):
"""Get window name from subnet."""
return subnet.split("-")[0]
def get_query_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
"""Return query command."""
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--query_initial_rps",
str(500),
"--max_query_load",
str(500),
"--skip_generate_report=True",
"--target_query_load",
str(440),
"--query_rps_increment",
str(40),
"--target_all=True",
]
def get_update_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
"""Retrun update command."""
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--max_update_load",
str(600),
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--target_update_load",
str(600),
"--update_rps_increment",
str(4),
"--update_initial_rps",
str(600),
"--skip_generate_report=True",
"--target_update_load",
str(600),
"--use_updates=True",
"--iter_duration={}".format(300),
]
def get_commands(self, do_updates=True):
"""Get commands to run based on the list of subnets and canister IDs."""
r = []
for subnet, canister in self.load_targets.items():
wg_testnet = None
for testnet, num_subnets in self.testnets.items():
if num_subnets > 0:
wg_testnet = testnet
break
self.testnets[wg_testnet] -= 1
wg_subnet = self.next_subnet[wg_testnet]
self.next_subnet[wg_testnet] += 1
subnet_prefix = self.get_window_name(subnet)
r.append(
(
subnet_prefix,
self.get_update_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix)
if do_updates
else self.get_query_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix),
)
)
return r
def run_in_session(self, name: str, command: List[str]):
"""Run the given command in a tmux session."""
assert len(name) > 0
subprocess.run(
[
"tmux",
"new-window",
"-n",
name,
" ".join(command) + '; echo "Check failure rate + hit enter to terminate"; read',
],
check=True,
)
def start(self, do_updates):
"""Start the benchmark."""
print(f"Starting workload with do_updates={do_updates}")
for name, command in self.get_commands(do_updates):
self.run_in_session(name, command)
def tmux_window_list(self) -> List[str]:
"""Get the current tmux window list."""
r = []
for line in subprocess.check_output(["tmux", "list-windows"], encoding="utf-8").split("\n"):
e = line.split(" ")
if len(e) > 1:
r.append(e[1])
return r
def wait(self):
"""Wait for all benchmarks to terminate."""
time.sleep(30)
for name in self.load_targets.keys():
print(f"Waiting for {name}")
while self.get_window_name(name) in self.tmux_window_list():
time.sleep(10)
FLAGS(sys.argv)
mainnet = Mainnet()
mainnet.start(FLAGS.use_updates)
# Need to sleep a bit in order to ensure that all windows are coming up
mainnet.wait()
print("All terminated, done")
| 38.836207 | 111 | 0.568036 |
import subprocess
import sys
import time
from typing import List
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_bool("use_updates", False, "Issue update calls instead of query calls")
class Mainnet:
def __init__(self):
if FLAGS.testnet == "mercury" and FLAGS.target_subnet_id is None:
raise Exception("--target_subnet_id has to be set when running against mainnet")
self.testnets = {
"large01": 5,
"large03": 5,
"large04": 5,
"large05": 5,
"medium04": 2,
}
self.load_targets = {
"ejbmu-grnam-gk6ol-6irwa-htwoj-7ihfl-goimw-hlnvh-abms4-47v2e-zqe": "nffi3-byaaa-aaaae-qaava-cai",
g-nnijg-npqp5-he3cj-3ae": "phin2-eyaaa-aaaak-qaaca-cai",
"opn46-zyspe-hhmyp-4zu6u-7sbrh-dok77-m7dch-im62f-vyimr-a3n2c-4ae": "psp4x-fqaaa-aaaak-qaabq-cai",
"w4asl-4nmyj-qnr7c-6cqq4-tkwmt-o26di-iupkq-vx4kt-asbrx-jzuxh-4ae": "wrd4y-xiaaa-aaaac-qaaaq-cai",
"lspz2-jx4pu-k3e7p-znm7j-q4yum-ork6e-6w4q6-pijwq-znehu-4jabe-kqe": "m4dvk-faaaa-aaaag-aaaba-cai",
"k44fs-gm4pv-afozh-rs7zw-cg32n-u7xov-xqyx3-2pw5q-eucnu-cosd4-uqe": "cst46-ryaaa-aaaak-aaaha-cai",
"lhg73-sax6z-2zank-6oer2-575lz-zgbxx-ptudx-5korm-fy7we-kh4hl-pqe": "anvl4-jaaaa-aaaag-qaaca-cai",
"brlsh-zidhj-3yy3e-6vqbz-7xnih-xeq2l-as5oc-g32c4-i5pdn-2wwof-oae": "qnlji-3yaaa-aaaai-aa2aq-cai",
"mpubz-g52jc-grhjo-5oze5-qcj74-sex34-omprz-ivnsm-qvvhr-rfzpv-vae": "2zwmb-wyaaa-aaaai-qa2vq-cai",
"qdvhd-os4o2-zzrdw-xrcv4-gljou-eztdp-bj326-e6jgr-tkhuc-ql6v2-yqe": "ivomh-taaaa-aaaaj-aac2a-cai",
"jtdsg-3h6gi-hs7o5-z2soi-43w3z-soyl3-ajnp3-ekni5-sw553-5kw67-nqe": "tiezx-5yaaa-aaaaj-qagya-cai",
"io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "ftuvq-daaaa-aaaad-aaaqa-cai",
"5kdm2-62fc6-fwnja-hutkz-ycsnm-4z33i-woh43-4cenu-ev7mi-gii6t-4ae": "iqjyz-jqaaa-aaaad-qayoa-cai",
"4zbus-z2bmt-ilreg-xakz4-6tyre-hsqj4-slb4g-zjwqo-snjcc-iqphi-3qe": "2oxpg-ayaaa-aaaac-aaacq-cai",
"qxesv-zoxpm-vc64m-zxguk-5sj74-35vrb-tbgwg-pcird-5gr26-62oxl-cae": "htg4w-ziaaa-aaaab-aabaa-cai",
"shefu-t3kr5-t5q3w-mqmdq-jabyv-vyvtf-cyyey-3kmo4-toyln-emubw-4qe": "2vzmb-ayaaa-aaaae-aaf3q-cai",
"csyj4-zmann-ys6ge-3kzi6-onexi-obayx-2fvak-zersm-euci4-6pslt-lae": "cozrd-caaaa-aaaaf-qaeua-cai",
"eq6en-6jqla-fbu5s-daskr-h6hx2-376n5-iqabl-qgrng-gfqmv-n3yjr-mqe": "34i5c-taaaa-aaaaf-aaa2q-cai",
"snjp4-xlbw4-mnbog-ddwy6-6ckfd-2w5a2-eipqo-7l436-pxqkh-l6fuv-vae": "3muos-6yaaa-aaaaa-qaaua-cai",
"pae4o-o6dxf-xki7q-ezclx-znyd6-fnk6w-vkv5z-5lfwh-xym2i-otrrw-fqe": "r7fsz-diaaa-aaaab-qadxa-cai",
}
self.next_subnet = {key: 0 for key in self.testnets.keys()}
total_subnetworks = sum(self.testnets.values())
total_targets = len(self.load_targets)
missing = total_targets - total_subnetworks
if total_targets > total_subnetworks:
print(
(
f"Insufficient testnets for load generation (have {total_subnetworks}, "
f"but {total_targets} load targets, {missing} more missing"
)
)
exit(1)
self.start_time = int(time.time())
def get_window_name(self, subnet):
return subnet.split("-")[0]
def get_query_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--query_initial_rps",
str(500),
"--max_query_load",
str(500),
"--skip_generate_report=True",
"--target_query_load",
str(440),
"--query_rps_increment",
str(40),
"--target_all=True",
]
def get_update_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--max_update_load",
str(600),
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--target_update_load",
str(600),
"--update_rps_increment",
str(4),
"--update_initial_rps",
str(600),
"--skip_generate_report=True",
"--target_update_load",
str(600),
"--use_updates=True",
"--iter_duration={}".format(300),
]
def get_commands(self, do_updates=True):
r = []
for subnet, canister in self.load_targets.items():
wg_testnet = None
for testnet, num_subnets in self.testnets.items():
if num_subnets > 0:
wg_testnet = testnet
break
self.testnets[wg_testnet] -= 1
wg_subnet = self.next_subnet[wg_testnet]
self.next_subnet[wg_testnet] += 1
subnet_prefix = self.get_window_name(subnet)
r.append(
(
subnet_prefix,
self.get_update_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix)
if do_updates
else self.get_query_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix),
)
)
return r
def run_in_session(self, name: str, command: List[str]):
assert len(name) > 0
subprocess.run(
[
"tmux",
"new-window",
"-n",
name,
" ".join(command) + '; echo "Check failure rate + hit enter to terminate"; read',
],
check=True,
)
def start(self, do_updates):
print(f"Starting workload with do_updates={do_updates}")
for name, command in self.get_commands(do_updates):
self.run_in_session(name, command)
def tmux_window_list(self) -> List[str]:
r = []
for line in subprocess.check_output(["tmux", "list-windows"], encoding="utf-8").split("\n"):
e = line.split(" ")
if len(e) > 1:
r.append(e[1])
return r
def wait(self):
time.sleep(30)
for name in self.load_targets.keys():
print(f"Waiting for {name}")
while self.get_window_name(name) in self.tmux_window_list():
time.sleep(10)
FLAGS(sys.argv)
mainnet = Mainnet()
mainnet.start(FLAGS.use_updates)
mainnet.wait()
print("All terminated, done")
| true | true |
f72067b03fcce0e8fbe2579787be36a9398c361c | 627 | py | Python | transform_file.py | BXuan694/universalAdversarialPerturbation | ebca90f76b5d45715c98a1ff0b6f11df753b51c6 | [
"BSD-2-Clause"
] | 38 | 2019-01-19T09:43:13.000Z | 2022-01-05T09:47:02.000Z | transform_file.py | BXuan694/universalAdversarialPerturbation | ebca90f76b5d45715c98a1ff0b6f11df753b51c6 | [
"BSD-2-Clause"
] | 3 | 2020-02-24T05:56:35.000Z | 2022-01-07T12:08:33.000Z | transform_file.py | BXuan694/universalAdversarialPerturbation | ebca90f76b5d45715c98a1ff0b6f11df753b51c6 | [
"BSD-2-Clause"
] | 10 | 2019-02-19T10:05:57.000Z | 2021-06-07T08:02:36.000Z | from torchvision import transforms
transform1 = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485,0.456,0.406], std = [0.229,0.224,0.225]),
])
cut = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
])
convert = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
transform = transforms.Compose([
cut.transforms[0],
cut.transforms[1],
convert.transforms[0],
convert.transforms[1]
])
| 24.115385 | 80 | 0.657097 | from torchvision import transforms
transform1 = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485,0.456,0.406], std = [0.229,0.224,0.225]),
])
cut = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
])
convert = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
transform = transforms.Compose([
cut.transforms[0],
cut.transforms[1],
convert.transforms[0],
convert.transforms[1]
])
| true | true |
f7206874eea5462451a2197d6ce51ff97b87f248 | 7,039 | py | Python | pypi_tools/main.py | xnuinside/pypi_tools_bot | 2ae408e510dcc30c39475af1f9cba8af866c54ee | [
"MIT"
] | 2 | 2021-03-04T07:37:38.000Z | 2021-04-01T16:57:10.000Z | pypi_tools/main.py | xnuinside/pypi_tools_bot | 2ae408e510dcc30c39475af1f9cba8af866c54ee | [
"MIT"
] | null | null | null | pypi_tools/main.py | xnuinside/pypi_tools_bot | 2ae408e510dcc30c39475af1f9cba8af866c54ee | [
"MIT"
] | null | null | null | import os
import logging
import sentry_sdk
from aiogram import Bot, Dispatcher, executor, types
from datetime import datetime, timedelta
from pypi_tools.logic import remove_track_for_package
import pypi_tools.data as d
from pypi_tools.helpers import validate_input
import pypi_tools.vizualizer as v
import pypi_tools.readme as r
import asyncio
import aioredis
logging.basicConfig(level=logging.INFO)
redis_host = f"redis://{os.environ.get('REDIS_HOST')}"
#sentry_sdk.init(os.environ["SENTRY_PATH"])
bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html")
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message):
text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \
f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \
"This Bot created special to obtain information from Official Python PyPi Server\n" \
+ r.help_text + r.current_version
await message.answer(text)
@dp.message_handler(commands=['help'])
async def send_welcome(message):
await message.answer(r.help_text)
@dp.message_handler(lambda message: message.text and (
'/stats' in message.text.lower() or 'stats:' in message.text.lower()))
@validate_input(command='stats',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
await message.answer(output)
@dp.message_handler(lambda message: message.text and (
'/plot' in message.text.lower() or 'plot:' in message.text.lower()))
@validate_input(command='plot',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats_with_graph(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
temp = 'temp/'
os.makedirs(temp, exist_ok=True)
# for pandas range
start_date = current_date - timedelta(days=2)
file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png'
if not os.path.isfile(file_name):
file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name)
file_ = types.InputFile(file_name)
await message.answer(output)
await message.answer_photo(file_)
@dp.message_handler(commands=['random'])
async def command(message):
output = await d.get_random_package()
await message.answer(output)
@dp.message_handler(commands=['search', 'search:detailed'])
@validate_input(command='search',
known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi(
_package_name, detailed=True)},
additional_error="Or use with sub-command to get detailed information:"
"/search:detailed aiohttp")
async def search_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
if sub_command:
output = await sub_command(package_name)
else:
output = await d.request_package_info_from_pypi(package_name)
await message.answer(output)
@dp.message_handler(commands=['releases', 'releases:full'])
@validate_input(command='releases',
known_sub_commands={'full': 'full'},
additional_error="Or use with sub-command to get full list of releases:"
"/releases:full aiohttp")
async def releases_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
releases = await d.get_release_list(package_name=package_name)
if sub_command and sub_command == 'full':
output = f"Full Releases list for Package {package_name}\n\n"
for version, v_date in releases.items():
output += f"<b>{version}</b>: {v_date}\n"
else:
output = f"Last 7 Releases for Package {package_name}\n\n"
for num, items in enumerate(list(releases.items())):
if num > 7:
break
version, v_date = items
output += f"<b>{version}</b>: {v_date}\n"
await message.answer(output)
track_sub_commands = {'stop': lambda key: remove_track_for_package(key),
'nodev': 'nodev'}
@dp.message_handler(commands=['track', 'track:stop', 'track:nodev'])
@validate_input(command='track',
known_sub_commands=track_sub_commands,
additional_error="Or use with sub-command to stop track a package releases"
"/track:stop aiohttp")
async def track_command(message):
""" handler to react on /track command and it sub-commands"""
pool = await aioredis.create_redis_pool(redis_host)
with await pool as redis:
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
chat_id = str(message.chat.id)
key = chat_id + ":" + package_name
if sub_command and sub_command != 'nodev':
output = await sub_command(key)
else:
nodev = False
if sub_command:
nodev = True
versions = await d.get_release_list(package_name, nodev)
if versions is None:
output = f'Package {package_name} does not exists'
else:
current_version = d.get_last_release_version(versions)
output = f"Current {package_name} version is {current_version} \n" \
"You will be announced with new version release"
version = current_version[0]
if nodev:
version = version + ':nodev'
await redis.set(key, version)
await message.answer(output)
@dp.message_handler()
async def echo_all(message: types.Message):
await message.answer(message.text)
if __name__ == '__main__':
try:
executor.start_polling(dp, skip_updates=True)
except Exception as e:
sentry_sdk.capture_exception(e)
| 40.687861 | 107 | 0.624378 | import os
import logging
import sentry_sdk
from aiogram import Bot, Dispatcher, executor, types
from datetime import datetime, timedelta
from pypi_tools.logic import remove_track_for_package
import pypi_tools.data as d
from pypi_tools.helpers import validate_input
import pypi_tools.vizualizer as v
import pypi_tools.readme as r
import asyncio
import aioredis
logging.basicConfig(level=logging.INFO)
redis_host = f"redis://{os.environ.get('REDIS_HOST')}"
bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html")
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message):
text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \
f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \
"This Bot created special to obtain information from Official Python PyPi Server\n" \
+ r.help_text + r.current_version
await message.answer(text)
@dp.message_handler(commands=['help'])
async def send_welcome(message):
await message.answer(r.help_text)
@dp.message_handler(lambda message: message.text and (
'/stats' in message.text.lower() or 'stats:' in message.text.lower()))
@validate_input(command='stats',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
await message.answer(output)
@dp.message_handler(lambda message: message.text and (
'/plot' in message.text.lower() or 'plot:' in message.text.lower()))
@validate_input(command='plot',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats_with_graph(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
temp = 'temp/'
os.makedirs(temp, exist_ok=True)
start_date = current_date - timedelta(days=2)
file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png'
if not os.path.isfile(file_name):
file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name)
file_ = types.InputFile(file_name)
await message.answer(output)
await message.answer_photo(file_)
@dp.message_handler(commands=['random'])
async def command(message):
output = await d.get_random_package()
await message.answer(output)
@dp.message_handler(commands=['search', 'search:detailed'])
@validate_input(command='search',
known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi(
_package_name, detailed=True)},
additional_error="Or use with sub-command to get detailed information:"
"/search:detailed aiohttp")
async def search_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
if sub_command:
output = await sub_command(package_name)
else:
output = await d.request_package_info_from_pypi(package_name)
await message.answer(output)
@dp.message_handler(commands=['releases', 'releases:full'])
@validate_input(command='releases',
known_sub_commands={'full': 'full'},
additional_error="Or use with sub-command to get full list of releases:"
"/releases:full aiohttp")
async def releases_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
releases = await d.get_release_list(package_name=package_name)
if sub_command and sub_command == 'full':
output = f"Full Releases list for Package {package_name}\n\n"
for version, v_date in releases.items():
output += f"<b>{version}</b>: {v_date}\n"
else:
output = f"Last 7 Releases for Package {package_name}\n\n"
for num, items in enumerate(list(releases.items())):
if num > 7:
break
version, v_date = items
output += f"<b>{version}</b>: {v_date}\n"
await message.answer(output)
track_sub_commands = {'stop': lambda key: remove_track_for_package(key),
'nodev': 'nodev'}
@dp.message_handler(commands=['track', 'track:stop', 'track:nodev'])
@validate_input(command='track',
known_sub_commands=track_sub_commands,
additional_error="Or use with sub-command to stop track a package releases"
"/track:stop aiohttp")
async def track_command(message):
pool = await aioredis.create_redis_pool(redis_host)
with await pool as redis:
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
chat_id = str(message.chat.id)
key = chat_id + ":" + package_name
if sub_command and sub_command != 'nodev':
output = await sub_command(key)
else:
nodev = False
if sub_command:
nodev = True
versions = await d.get_release_list(package_name, nodev)
if versions is None:
output = f'Package {package_name} does not exists'
else:
current_version = d.get_last_release_version(versions)
output = f"Current {package_name} version is {current_version} \n" \
"You will be announced with new version release"
version = current_version[0]
if nodev:
version = version + ':nodev'
await redis.set(key, version)
await message.answer(output)
@dp.message_handler()
async def echo_all(message: types.Message):
await message.answer(message.text)
if __name__ == '__main__':
try:
executor.start_polling(dp, skip_updates=True)
except Exception as e:
sentry_sdk.capture_exception(e)
| true | true |
f720698516b3281db66e46d699cb433d69aa86c5 | 473 | py | Python | 4_factory/simple_factory/veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/simple_factory/veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/simple_factory/veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | from pizza import Pizza
class VeggiePizza(Pizza):
def __init__(self):
self.name = 'Veggie Pizza'
self.dough = 'Crust'
self.sauce = 'Marinara sauce'
self.toppings.append('Shredded mozzarella')
self.toppings.append('Grated parmesan')
self.toppings.append('Diced onion')
self.toppings.append('Sliced mushrooms')
self.toppings.append('Sliced red pepper')
self.toppings.append('Sliced black olives')
| 31.533333 | 51 | 0.651163 | from pizza import Pizza
class VeggiePizza(Pizza):
def __init__(self):
self.name = 'Veggie Pizza'
self.dough = 'Crust'
self.sauce = 'Marinara sauce'
self.toppings.append('Shredded mozzarella')
self.toppings.append('Grated parmesan')
self.toppings.append('Diced onion')
self.toppings.append('Sliced mushrooms')
self.toppings.append('Sliced red pepper')
self.toppings.append('Sliced black olives')
| true | true |
f7206acbc129c68b6043e8dd105bdfbbd6738ace | 240 | py | Python | python/homeflux/utils/timer.py | david2777/homeflux | 4f7c4b855bd69b0b132d480cac133582445cacf5 | [
"MIT"
] | null | null | null | python/homeflux/utils/timer.py | david2777/homeflux | 4f7c4b855bd69b0b132d480cac133582445cacf5 | [
"MIT"
] | null | null | null | python/homeflux/utils/timer.py | david2777/homeflux | 4f7c4b855bd69b0b132d480cac133582445cacf5 | [
"MIT"
] | null | null | null | import time
class Timer:
"""Simple Timer"""
def __init__(self):
self.start = time.perf_counter()
def end(self, precision: int = 3) -> str:
return '%.{}f'.format(precision) % (time.perf_counter() - self.start)
| 21.818182 | 77 | 0.6 | import time
class Timer:
def __init__(self):
self.start = time.perf_counter()
def end(self, precision: int = 3) -> str:
return '%.{}f'.format(precision) % (time.perf_counter() - self.start)
| true | true |
f7206b1325541721ae9f9158f32fc2ad213a5369 | 16,774 | py | Python | pw_tokenizer/py/pw_tokenizer/tokens.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | null | null | null | pw_tokenizer/py/pw_tokenizer/tokens.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | null | null | null | pw_tokenizer/py/pw_tokenizer/tokens.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Builds and manages databases of tokenized strings."""
import collections
import csv
from dataclasses import dataclass
from datetime import datetime
import io
import logging
from pathlib import Path
import re
import struct
from typing import (BinaryIO, Callable, Dict, Iterable, Iterator, List,
NamedTuple, Optional, Pattern, Tuple, Union, ValuesView)
DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DOMAIN = ''
# The default hash length to use. This value only applies when hashing strings
# from a legacy-style ELF with plain strings. New tokenized string entries
# include the token alongside the string.
#
# This MUST match the default value of PW_TOKENIZER_CFG_C_HASH_LENGTH in
# pw_tokenizer/public/pw_tokenizer/config.h.
DEFAULT_C_HASH_LENGTH = 128
TOKENIZER_HASH_CONSTANT = 65599
_LOG = logging.getLogger('pw_tokenizer')
def _value(char: Union[int, str]) -> int:
return char if isinstance(char, int) else ord(char)
def pw_tokenizer_65599_fixed_length_hash(string: Union[str, bytes],
hash_length: int) -> int:
"""Hashes the provided string.
This hash function is only used when adding tokens from legacy-style
tokenized strings in an ELF, which do not include the token.
"""
hash_value = len(string)
coefficient = TOKENIZER_HASH_CONSTANT
for char in string[:hash_length]:
hash_value = (hash_value + coefficient * _value(char)) % 2**32
coefficient = (coefficient * TOKENIZER_HASH_CONSTANT) % 2**32
return hash_value
def default_hash(string: Union[str, bytes]) -> int:
return pw_tokenizer_65599_fixed_length_hash(string, DEFAULT_C_HASH_LENGTH)
class _EntryKey(NamedTuple):
"""Uniquely refers to an entry."""
token: int
string: str
@dataclass(eq=True, order=False)
class TokenizedStringEntry:
"""A tokenized string with its metadata."""
token: int
string: str
domain: str = DEFAULT_DOMAIN
date_removed: Optional[datetime] = None
def key(self) -> _EntryKey:
"""The key determines uniqueness for a tokenized string."""
return _EntryKey(self.token, self.string)
def update_date_removed(self,
new_date_removed: Optional[datetime]) -> None:
"""Sets self.date_removed if the other date is newer."""
# No removal date (None) is treated as the newest date.
if self.date_removed is None:
return
if new_date_removed is None or new_date_removed > self.date_removed:
self.date_removed = new_date_removed
def __lt__(self, other) -> bool:
"""Sorts the entry by token, date removed, then string."""
if self.token != other.token:
return self.token < other.token
# Sort removal dates in reverse, so the most recently removed (or still
# present) entry appears first.
if self.date_removed != other.date_removed:
return (other.date_removed or datetime.max) < (self.date_removed
or datetime.max)
return self.string < other.string
def __str__(self) -> str:
return self.string
class Database:
"""Database of tokenized strings stored as TokenizedStringEntry objects."""
def __init__(self, entries: Iterable[TokenizedStringEntry] = ()):
"""Creates a token database."""
# The database dict stores each unique (token, string) entry.
self._database: Dict[_EntryKey, TokenizedStringEntry] = {
entry.key(): entry
for entry in entries
}
# This is a cache for fast token lookup that is built as needed.
self._cache: Optional[Dict[int, List[TokenizedStringEntry]]] = None
@classmethod
def from_strings(
cls,
strings: Iterable[str],
domain: str = DEFAULT_DOMAIN,
tokenize: Callable[[str], int] = default_hash) -> 'Database':
"""Creates a Database from an iterable of strings."""
return cls((TokenizedStringEntry(tokenize(string), string, domain)
for string in strings))
@classmethod
def merged(cls, *databases: 'Database') -> 'Database':
"""Creates a TokenDatabase from one or more other databases."""
db = cls()
db.merge(*databases)
return db
@property
def token_to_entries(self) -> Dict[int, List[TokenizedStringEntry]]:
"""Returns a dict that maps tokens to a list of TokenizedStringEntry."""
if self._cache is None: # build cache token -> entry cache
self._cache = collections.defaultdict(list)
for entry in self._database.values():
self._cache[entry.token].append(entry)
return self._cache
def entries(self) -> ValuesView[TokenizedStringEntry]:
"""Returns iterable over all TokenizedStringEntries in the database."""
return self._database.values()
def collisions(self) -> Iterator[Tuple[int, List[TokenizedStringEntry]]]:
"""Returns tuple of (token, entries_list)) for all colliding tokens."""
for token, entries in self.token_to_entries.items():
if len(entries) > 1:
yield token, entries
def mark_removals(
self,
all_entries: Iterable[TokenizedStringEntry],
removal_date: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
"""Marks entries missing from all_entries as having been removed.
The entries are assumed to represent the complete set of entries for the
database. Entries currently in the database not present in the provided
entries are marked with a removal date but remain in the database.
Entries in all_entries missing from the database are NOT added; call the
add function to add these.
Args:
all_entries: the complete set of strings present in the database
removal_date: the datetime for removed entries; today by default
Returns:
A list of entries marked as removed.
"""
self._cache = None
if removal_date is None:
removal_date = datetime.now()
all_keys = frozenset(entry.key() for entry in all_entries)
removed = []
for entry in self._database.values():
if (entry.key() not in all_keys
and (entry.date_removed is None
or removal_date < entry.date_removed)):
# Add a removal date, or update it to the oldest date.
entry.date_removed = removal_date
removed.append(entry)
return removed
def add(self, entries: Iterable[TokenizedStringEntry]) -> None:
"""Adds new entries and updates date_removed for existing entries."""
self._cache = None
for new_entry in entries:
# Update an existing entry or create a new one.
try:
entry = self._database[new_entry.key()]
entry.domain = new_entry.domain
entry.date_removed = None
except KeyError:
self._database[new_entry.key()] = TokenizedStringEntry(
new_entry.token, new_entry.string, new_entry.domain)
def purge(
self,
date_removed_cutoff: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
"""Removes and returns entries removed on/before date_removed_cutoff."""
self._cache = None
if date_removed_cutoff is None:
date_removed_cutoff = datetime.max
to_delete = [
entry for _, entry in self._database.items()
if entry.date_removed and entry.date_removed <= date_removed_cutoff
]
for entry in to_delete:
del self._database[entry.key()]
return to_delete
def merge(self, *databases: 'Database') -> None:
"""Merges two or more databases together, keeping the newest dates."""
self._cache = None
for other_db in databases:
for entry in other_db.entries():
key = entry.key()
if key in self._database:
self._database[key].update_date_removed(entry.date_removed)
else:
self._database[key] = entry
def filter(
self,
include: Iterable[Union[str, Pattern[str]]] = (),
exclude: Iterable[Union[str, Pattern[str]]] = (),
replace: Iterable[Tuple[Union[str, Pattern[str]], str]] = ()
) -> None:
"""Filters the database using regular expressions (strings or compiled).
Args:
include: regexes; only entries matching at least one are kept
exclude: regexes; entries matching any of these are removed
replace: (regex, str) tuples; replaces matching terms in all entries
"""
self._cache = None
to_delete: List[_EntryKey] = []
if include:
include_re = [re.compile(pattern) for pattern in include]
to_delete.extend(
key for key, val in self._database.items()
if not any(rgx.search(val.string) for rgx in include_re))
if exclude:
exclude_re = [re.compile(pattern) for pattern in exclude]
to_delete.extend(key for key, val in self._database.items() if any(
rgx.search(val.string) for rgx in exclude_re))
for key in to_delete:
del self._database[key]
for search, replacement in replace:
search = re.compile(search)
for value in self._database.values():
value.string = search.sub(replacement, value.string)
def __len__(self) -> int:
"""Returns the number of entries in the database."""
return len(self.entries())
def __str__(self) -> str:
"""Outputs the database as CSV."""
csv_output = io.BytesIO()
write_csv(self, csv_output)
return csv_output.getvalue().decode()
def parse_csv(fd) -> Iterable[TokenizedStringEntry]:
"""Parses TokenizedStringEntries from a CSV token database file."""
for line in csv.reader(fd):
try:
token_str, date_str, string_literal = line
token = int(token_str, 16)
date = (datetime.strptime(date_str, DATE_FORMAT)
if date_str.strip() else None)
yield TokenizedStringEntry(token, string_literal, DEFAULT_DOMAIN,
date)
except (ValueError, UnicodeDecodeError) as err:
_LOG.error('Failed to parse tokenized string entry %s: %s', line,
err)
def write_csv(database: Database, fd: BinaryIO) -> None:
"""Writes the database as CSV to the provided binary file."""
for entry in sorted(database.entries()):
# Align the CSV output to 10-character columns for improved readability.
# Use \n instead of RFC 4180's \r\n.
fd.write('{:08x},{:10},"{}"\n'.format(
entry.token,
entry.date_removed.strftime(DATE_FORMAT) if entry.date_removed else
'', entry.string.replace('"', '""')).encode()) # escape " as ""
class _BinaryFileFormat(NamedTuple):
"""Attributes of the binary token database file format."""
magic: bytes = b'TOKENS\0\0'
header: struct.Struct = struct.Struct('<8sI4x')
entry: struct.Struct = struct.Struct('<IBBH')
BINARY_FORMAT = _BinaryFileFormat()
class DatabaseFormatError(Exception):
"""Failed to parse a token database file."""
def file_is_binary_database(fd: BinaryIO) -> bool:
"""True if the file starts with the binary token database magic string."""
try:
fd.seek(0)
magic = fd.read(len(BINARY_FORMAT.magic))
fd.seek(0)
return BINARY_FORMAT.magic == magic
except IOError:
return False
def _check_that_file_is_csv_database(path: Path) -> None:
"""Raises an error unless the path appears to be a CSV token database."""
try:
with path.open('rb') as fd:
data = fd.read(8) # Read 8 bytes, which should be the first token.
if not data:
return # File is empty, which is valid CSV.
if len(data) != 8:
raise DatabaseFormatError(
f'Attempted to read {path} as a CSV token database, but the '
f'file is too short ({len(data)} B)')
# Make sure the first 8 chars are a valid hexadecimal number.
_ = int(data.decode(), 16)
except (IOError, UnicodeDecodeError, ValueError) as err:
raise DatabaseFormatError(
f'Encountered error while reading {path} as a CSV token database'
) from err
def parse_binary(fd: BinaryIO) -> Iterable[TokenizedStringEntry]:
"""Parses TokenizedStringEntries from a binary token database file."""
magic, entry_count = BINARY_FORMAT.header.unpack(
fd.read(BINARY_FORMAT.header.size))
if magic != BINARY_FORMAT.magic:
raise DatabaseFormatError(
f'Binary token database magic number mismatch (found {magic!r}, '
f'expected {BINARY_FORMAT.magic!r}) while reading from {fd}')
entries = []
for _ in range(entry_count):
token, day, month, year = BINARY_FORMAT.entry.unpack(
fd.read(BINARY_FORMAT.entry.size))
try:
date_removed: Optional[datetime] = datetime(year, month, day)
except ValueError:
date_removed = None
entries.append((token, date_removed))
# Read the entire string table and define a function for looking up strings.
string_table = fd.read()
def read_string(start):
end = string_table.find(b'\0', start)
return string_table[start:string_table.find(b'\0', start)].decode(
), end + 1
offset = 0
for token, removed in entries:
string, offset = read_string(offset)
yield TokenizedStringEntry(token, string, DEFAULT_DOMAIN, removed)
def write_binary(database: Database, fd: BinaryIO) -> None:
"""Writes the database as packed binary to the provided binary file."""
entries = sorted(database.entries())
fd.write(BINARY_FORMAT.header.pack(BINARY_FORMAT.magic, len(entries)))
string_table = bytearray()
for entry in entries:
if entry.date_removed:
removed_day = entry.date_removed.day
removed_month = entry.date_removed.month
removed_year = entry.date_removed.year
else:
# If there is no removal date, use the special value 0xffffffff for
# the day/month/year. That ensures that still-present tokens appear
# as the newest tokens when sorted by removal date.
removed_day = 0xff
removed_month = 0xff
removed_year = 0xffff
string_table += entry.string.encode()
string_table.append(0)
fd.write(
BINARY_FORMAT.entry.pack(entry.token, removed_day, removed_month,
removed_year))
fd.write(string_table)
class DatabaseFile(Database):
"""A token database that is associated with a particular file.
This class adds the write_to_file() method that writes to file from which it
was created in the correct format (CSV or binary).
"""
def __init__(self, path: Union[Path, str]):
self.path = Path(path)
# Read the path as a packed binary file.
with self.path.open('rb') as fd:
if file_is_binary_database(fd):
super().__init__(parse_binary(fd))
self._export = write_binary
return
# Read the path as a CSV file.
_check_that_file_is_csv_database(self.path)
with self.path.open('r', newline='') as file:
super().__init__(parse_csv(file))
self._export = write_csv
def write_to_file(self, path: Optional[Union[Path, str]] = None) -> None:
"""Exports in the original format to the original or provided path."""
with open(self.path if path is None else path, 'wb') as fd:
self._export(self, fd)
| 35.84188 | 80 | 0.63205 |
import collections
import csv
from dataclasses import dataclass
from datetime import datetime
import io
import logging
from pathlib import Path
import re
import struct
from typing import (BinaryIO, Callable, Dict, Iterable, Iterator, List,
NamedTuple, Optional, Pattern, Tuple, Union, ValuesView)
DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DOMAIN = ''
DEFAULT_C_HASH_LENGTH = 128
TOKENIZER_HASH_CONSTANT = 65599
_LOG = logging.getLogger('pw_tokenizer')
def _value(char: Union[int, str]) -> int:
return char if isinstance(char, int) else ord(char)
def pw_tokenizer_65599_fixed_length_hash(string: Union[str, bytes],
hash_length: int) -> int:
hash_value = len(string)
coefficient = TOKENIZER_HASH_CONSTANT
for char in string[:hash_length]:
hash_value = (hash_value + coefficient * _value(char)) % 2**32
coefficient = (coefficient * TOKENIZER_HASH_CONSTANT) % 2**32
return hash_value
def default_hash(string: Union[str, bytes]) -> int:
return pw_tokenizer_65599_fixed_length_hash(string, DEFAULT_C_HASH_LENGTH)
class _EntryKey(NamedTuple):
token: int
string: str
@dataclass(eq=True, order=False)
class TokenizedStringEntry:
token: int
string: str
domain: str = DEFAULT_DOMAIN
date_removed: Optional[datetime] = None
def key(self) -> _EntryKey:
return _EntryKey(self.token, self.string)
def update_date_removed(self,
new_date_removed: Optional[datetime]) -> None:
if self.date_removed is None:
return
if new_date_removed is None or new_date_removed > self.date_removed:
self.date_removed = new_date_removed
def __lt__(self, other) -> bool:
if self.token != other.token:
return self.token < other.token
if self.date_removed != other.date_removed:
return (other.date_removed or datetime.max) < (self.date_removed
or datetime.max)
return self.string < other.string
def __str__(self) -> str:
return self.string
class Database:
def __init__(self, entries: Iterable[TokenizedStringEntry] = ()):
self._database: Dict[_EntryKey, TokenizedStringEntry] = {
entry.key(): entry
for entry in entries
}
self._cache: Optional[Dict[int, List[TokenizedStringEntry]]] = None
@classmethod
def from_strings(
cls,
strings: Iterable[str],
domain: str = DEFAULT_DOMAIN,
tokenize: Callable[[str], int] = default_hash) -> 'Database':
return cls((TokenizedStringEntry(tokenize(string), string, domain)
for string in strings))
@classmethod
def merged(cls, *databases: 'Database') -> 'Database':
db = cls()
db.merge(*databases)
return db
@property
def token_to_entries(self) -> Dict[int, List[TokenizedStringEntry]]:
if self._cache is None:
self._cache = collections.defaultdict(list)
for entry in self._database.values():
self._cache[entry.token].append(entry)
return self._cache
def entries(self) -> ValuesView[TokenizedStringEntry]:
return self._database.values()
def collisions(self) -> Iterator[Tuple[int, List[TokenizedStringEntry]]]:
for token, entries in self.token_to_entries.items():
if len(entries) > 1:
yield token, entries
def mark_removals(
self,
all_entries: Iterable[TokenizedStringEntry],
removal_date: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
self._cache = None
if removal_date is None:
removal_date = datetime.now()
all_keys = frozenset(entry.key() for entry in all_entries)
removed = []
for entry in self._database.values():
if (entry.key() not in all_keys
and (entry.date_removed is None
or removal_date < entry.date_removed)):
entry.date_removed = removal_date
removed.append(entry)
return removed
def add(self, entries: Iterable[TokenizedStringEntry]) -> None:
self._cache = None
for new_entry in entries:
try:
entry = self._database[new_entry.key()]
entry.domain = new_entry.domain
entry.date_removed = None
except KeyError:
self._database[new_entry.key()] = TokenizedStringEntry(
new_entry.token, new_entry.string, new_entry.domain)
def purge(
self,
date_removed_cutoff: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
self._cache = None
if date_removed_cutoff is None:
date_removed_cutoff = datetime.max
to_delete = [
entry for _, entry in self._database.items()
if entry.date_removed and entry.date_removed <= date_removed_cutoff
]
for entry in to_delete:
del self._database[entry.key()]
return to_delete
def merge(self, *databases: 'Database') -> None:
self._cache = None
for other_db in databases:
for entry in other_db.entries():
key = entry.key()
if key in self._database:
self._database[key].update_date_removed(entry.date_removed)
else:
self._database[key] = entry
def filter(
self,
include: Iterable[Union[str, Pattern[str]]] = (),
exclude: Iterable[Union[str, Pattern[str]]] = (),
replace: Iterable[Tuple[Union[str, Pattern[str]], str]] = ()
) -> None:
self._cache = None
to_delete: List[_EntryKey] = []
if include:
include_re = [re.compile(pattern) for pattern in include]
to_delete.extend(
key for key, val in self._database.items()
if not any(rgx.search(val.string) for rgx in include_re))
if exclude:
exclude_re = [re.compile(pattern) for pattern in exclude]
to_delete.extend(key for key, val in self._database.items() if any(
rgx.search(val.string) for rgx in exclude_re))
for key in to_delete:
del self._database[key]
for search, replacement in replace:
search = re.compile(search)
for value in self._database.values():
value.string = search.sub(replacement, value.string)
def __len__(self) -> int:
return len(self.entries())
def __str__(self) -> str:
csv_output = io.BytesIO()
write_csv(self, csv_output)
return csv_output.getvalue().decode()
def parse_csv(fd) -> Iterable[TokenizedStringEntry]:
for line in csv.reader(fd):
try:
token_str, date_str, string_literal = line
token = int(token_str, 16)
date = (datetime.strptime(date_str, DATE_FORMAT)
if date_str.strip() else None)
yield TokenizedStringEntry(token, string_literal, DEFAULT_DOMAIN,
date)
except (ValueError, UnicodeDecodeError) as err:
_LOG.error('Failed to parse tokenized string entry %s: %s', line,
err)
def write_csv(database: Database, fd: BinaryIO) -> None:
for entry in sorted(database.entries()):
fd.write('{:08x},{:10},"{}"\n'.format(
entry.token,
entry.date_removed.strftime(DATE_FORMAT) if entry.date_removed else
'', entry.string.replace('"', '""')).encode()) # escape " as ""
class _BinaryFileFormat(NamedTuple):
magic: bytes = b'TOKENS\0\0'
header: struct.Struct = struct.Struct('<8sI4x')
entry: struct.Struct = struct.Struct('<IBBH')
BINARY_FORMAT = _BinaryFileFormat()
class DatabaseFormatError(Exception):
def file_is_binary_database(fd: BinaryIO) -> bool:
try:
fd.seek(0)
magic = fd.read(len(BINARY_FORMAT.magic))
fd.seek(0)
return BINARY_FORMAT.magic == magic
except IOError:
return False
def _check_that_file_is_csv_database(path: Path) -> None:
try:
with path.open('rb') as fd:
data = fd.read(8) # Read 8 bytes, which should be the first token.
if not data:
return # File is empty, which is valid CSV.
if len(data) != 8:
raise DatabaseFormatError(
f'Attempted to read {path} as a CSV token database, but the '
f'file is too short ({len(data)} B)')
# Make sure the first 8 chars are a valid hexadecimal number.
_ = int(data.decode(), 16)
except (IOError, UnicodeDecodeError, ValueError) as err:
raise DatabaseFormatError(
f'Encountered error while reading {path} as a CSV token database'
) from err
def parse_binary(fd: BinaryIO) -> Iterable[TokenizedStringEntry]:
magic, entry_count = BINARY_FORMAT.header.unpack(
fd.read(BINARY_FORMAT.header.size))
if magic != BINARY_FORMAT.magic:
raise DatabaseFormatError(
f'Binary token database magic number mismatch (found {magic!r}, '
f'expected {BINARY_FORMAT.magic!r}) while reading from {fd}')
entries = []
for _ in range(entry_count):
token, day, month, year = BINARY_FORMAT.entry.unpack(
fd.read(BINARY_FORMAT.entry.size))
try:
date_removed: Optional[datetime] = datetime(year, month, day)
except ValueError:
date_removed = None
entries.append((token, date_removed))
# Read the entire string table and define a function for looking up strings.
string_table = fd.read()
def read_string(start):
end = string_table.find(b'\0', start)
return string_table[start:string_table.find(b'\0', start)].decode(
), end + 1
offset = 0
for token, removed in entries:
string, offset = read_string(offset)
yield TokenizedStringEntry(token, string, DEFAULT_DOMAIN, removed)
def write_binary(database: Database, fd: BinaryIO) -> None:
entries = sorted(database.entries())
fd.write(BINARY_FORMAT.header.pack(BINARY_FORMAT.magic, len(entries)))
string_table = bytearray()
for entry in entries:
if entry.date_removed:
removed_day = entry.date_removed.day
removed_month = entry.date_removed.month
removed_year = entry.date_removed.year
else:
# If there is no removal date, use the special value 0xffffffff for
# the day/month/year. That ensures that still-present tokens appear
# as the newest tokens when sorted by removal date.
removed_day = 0xff
removed_month = 0xff
removed_year = 0xffff
string_table += entry.string.encode()
string_table.append(0)
fd.write(
BINARY_FORMAT.entry.pack(entry.token, removed_day, removed_month,
removed_year))
fd.write(string_table)
class DatabaseFile(Database):
def __init__(self, path: Union[Path, str]):
self.path = Path(path)
# Read the path as a packed binary file.
with self.path.open('rb') as fd:
if file_is_binary_database(fd):
super().__init__(parse_binary(fd))
self._export = write_binary
return
# Read the path as a CSV file.
_check_that_file_is_csv_database(self.path)
with self.path.open('r', newline='') as file:
super().__init__(parse_csv(file))
self._export = write_csv
def write_to_file(self, path: Optional[Union[Path, str]] = None) -> None:
with open(self.path if path is None else path, 'wb') as fd:
self._export(self, fd)
| true | true |
f7206b36e5a22338c09290307a5bbcd5356c269a | 366 | py | Python | chatterbot/ext/django_chatterbot/migrations/0016_statement_stemmed_text.py | dieterwarson/ChatterBot | 69c674218be274bca1f47c105b09995373e09f47 | [
"BSD-3-Clause"
] | null | null | null | chatterbot/ext/django_chatterbot/migrations/0016_statement_stemmed_text.py | dieterwarson/ChatterBot | 69c674218be274bca1f47c105b09995373e09f47 | [
"BSD-3-Clause"
] | null | null | null | chatterbot/ext/django_chatterbot/migrations/0016_statement_stemmed_text.py | dieterwarson/ChatterBot | 69c674218be274bca1f47c105b09995373e09f47 | [
"BSD-3-Clause"
] | null | null | null | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0015_statement_persona'),
]
operations = [
migrations.AddField(
model_name='statement',
name='stemmed_text',
field=models.CharField(blank=True, max_length=400),
),
]
| 21.529412 | 63 | 0.60929 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0015_statement_persona'),
]
operations = [
migrations.AddField(
model_name='statement',
name='stemmed_text',
field=models.CharField(blank=True, max_length=400),
),
]
| true | true |
f7206bcb4173f335d34eb68cdae8a22c3c3c3a67 | 1,928 | py | Python | docs_src/sql_databases/sql_app/alt_main.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 53,007 | 2018-12-08T10:05:29.000Z | 2022-03-31T23:30:02.000Z | docs_src/sql_databases/sql_app/alt_main.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,155 | 2019-01-05T05:07:49.000Z | 2022-03-31T21:25:38.000Z | docs_src/sql_databases/sql_app/alt_main.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,092 | 2018-12-09T16:21:00.000Z | 2022-03-31T07:59:45.000Z | from typing import List
from fastapi import Depends, FastAPI, HTTPException, Request, Response
from sqlalchemy.orm import Session
from . import crud, models, schemas
from .database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Internal server error", status_code=500)
try:
request.state.db = SessionLocal()
response = await call_next(request)
finally:
request.state.db.close()
return response
# Dependency
def get_db(request: Request):
return request.state.db
@app.post("/users/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return crud.create_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.User])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}", response_model=schemas.User)
def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/users/{user_id}/items/", response_model=schemas.Item)
def create_item_for_user(
user_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)
):
return crud.create_user_item(db=db, item=item, user_id=user_id)
@app.get("/items/", response_model=List[schemas.Item])
def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
items = crud.get_items(db, skip=skip, limit=limit)
return items
| 30.603175 | 79 | 0.721992 | from typing import List
from fastapi import Depends, FastAPI, HTTPException, Request, Response
from sqlalchemy.orm import Session
from . import crud, models, schemas
from .database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Internal server error", status_code=500)
try:
request.state.db = SessionLocal()
response = await call_next(request)
finally:
request.state.db.close()
return response
def get_db(request: Request):
return request.state.db
@app.post("/users/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return crud.create_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.User])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}", response_model=schemas.User)
def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/users/{user_id}/items/", response_model=schemas.Item)
def create_item_for_user(
user_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)
):
return crud.create_user_item(db=db, item=item, user_id=user_id)
@app.get("/items/", response_model=List[schemas.Item])
def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
items = crud.get_items(db, skip=skip, limit=limit)
return items
| true | true |
f7206d083d469a643b9a783b3a819077f502c23e | 450 | py | Python | Lib/fontTools/ttLib/tables/T_S_I__2.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-04-07T16:47:04.000Z | 2022-01-15T04:01:01.000Z | Lib/fontTools/ttLib/tables/T_S_I__2.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 74 | 2020-01-30T07:27:54.000Z | 2021-08-03T05:47:17.000Z | Lib/fontTools/ttLib/tables/T_S_I__2.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-01-22T20:06:09.000Z | 2020-01-22T20:06:09.000Z | """ TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
tool to store its hinting source data.
TSI2 is the index table containing the lengths and offsets for the glyph
programs that are contained in the TSI3 table. It uses the same format as
the TSI0 table.
"""
from fontTools.misc.py23 import *
from fontTools import ttLib
superclass = ttLib.getTableClass("TSI0")
class table_T_S_I__2(superclass):
dependencies = ["TSI3"]
| 28.125 | 77 | 0.768889 | from fontTools.misc.py23 import *
from fontTools import ttLib
superclass = ttLib.getTableClass("TSI0")
class table_T_S_I__2(superclass):
dependencies = ["TSI3"]
| true | true |
f7206d8f4f866ba92ed05ffaf316f027c8f23f04 | 405 | py | Python | psqlflow/printers/graph_printer.py | liuhenry/psqlflow | a4d39794d437a16fbf89582d3f7b8e7425bdfca5 | [
"MIT"
] | 3 | 2016-12-11T19:50:44.000Z | 2018-05-24T13:52:09.000Z | psqlflow/printers/graph_printer.py | liuhenry/psqlflow | a4d39794d437a16fbf89582d3f7b8e7425bdfca5 | [
"MIT"
] | null | null | null | psqlflow/printers/graph_printer.py | liuhenry/psqlflow | a4d39794d437a16fbf89582d3f7b8e7425bdfca5 | [
"MIT"
] | null | null | null | import pygraphviz as pgv
from .printer import Printer
class GraphPrinter(Printer):
"""
Exports flows to graphviz
"""
def __init__(self, *args, **kwargs):
super(GraphPrinter, self).__init__(*args, **kwargs)
def new_obj(self):
return pgv.AGraph(strict=False, directed=True, rankdir='LR')
@staticmethod
def add_edge(graph, a, b):
graph.add_edge(a, b)
| 21.315789 | 68 | 0.644444 | import pygraphviz as pgv
from .printer import Printer
class GraphPrinter(Printer):
def __init__(self, *args, **kwargs):
super(GraphPrinter, self).__init__(*args, **kwargs)
def new_obj(self):
return pgv.AGraph(strict=False, directed=True, rankdir='LR')
@staticmethod
def add_edge(graph, a, b):
graph.add_edge(a, b)
| true | true |
f7206e62311f2ae8bf498980b4772f3ea02b5efc | 6,337 | py | Python | apps/alerts/main.py | Cal-CS-61A-Staff/examtool-web | c31b8596fde75c54fe6436400bb6d2889d7b1283 | [
"MIT"
] | 1 | 2020-05-06T22:34:44.000Z | 2020-05-06T22:34:44.000Z | apps/alerts/main.py | Cal-CS-61A-Staff/examtool-web | c31b8596fde75c54fe6436400bb6d2889d7b1283 | [
"MIT"
] | 5 | 2020-07-13T09:29:01.000Z | 2020-10-18T06:33:30.000Z | apps/alerts/main.py | Cal-CS-61A-Staff/examtool-web | c31b8596fde75c54fe6436400bb6d2889d7b1283 | [
"MIT"
] | 5 | 2020-05-13T16:10:24.000Z | 2020-09-23T18:41:06.000Z | import time
from os import getenv
from flask import jsonify, abort
from google.cloud import firestore
from google.oauth2 import id_token
from google.auth.transport import requests as g_requests
from api import (
process_ok_exam_upload,
is_admin,
clear_collection,
get_announcements,
get_email_from_secret,
generate_audio,
)
# this can be public
CLIENT_ID = "713452892775-59gliacuhbfho8qvn4ctngtp3858fgf9.apps.googleusercontent.com"
DEV_EMAIL = getenv("DEV_EMAIL", "exam-test@berkeley.edu")
def update_cache():
global main_html, main_js
with open("static/index.html") as f:
main_html = f.read()
with open("static/main.js") as f:
main_js = f.read()
update_cache()
def get_email(request):
if getenv("ENV") == "dev":
return DEV_EMAIL
token = request.json["token"]
# validate token
id_info = id_token.verify_oauth2_token(token, g_requests.Request(), CLIENT_ID)
if id_info["iss"] not in ["accounts.google.com", "https://accounts.google.com"]:
raise ValueError("Wrong issuer.")
return id_info["email"]
def index(request):
try:
if getenv("ENV") == "dev":
update_cache()
db = firestore.Client()
if request.path.endswith("main.js"):
return main_js
if request.path.endswith("list_exams"):
return jsonify(
db.collection("exam-alerts")
.document("all")
.get()
.to_dict()["exam-list"]
)
if request.path == "/" or request.json is None:
return main_html
if request.path.endswith("upload_ok_exam"):
process_ok_exam_upload(db, request.json["data"], request.json["secret"])
return jsonify({"success": True})
exam = request.json["exam"]
course = exam.split("-")[0]
if request.path.endswith("fetch_data"):
received_audio = request.json.get("receivedAudio")
email = get_email(request)
student_data = (
db.collection("exam-alerts")
.document(exam)
.collection("students")
.document(email)
.get()
.to_dict()
)
announcements = list(
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
)
return jsonify(
{
"success": True,
"exam_type": "ok-exam",
"questions": [],
"startTime": student_data["start_time"],
"endTime": student_data["end_time"],
# "questions": [
# {
# "questionName": question["student_question_name"],
# "startTime": question["start_time"],
# "endTime": question["end_time"],
# }
# for question in student_data["questions"]
# ],
"announcements": get_announcements(
student_data,
announcements,
received_audio,
lambda x: (
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio")
.document(x)
.get()
.to_dict()
or {}
).get("audio"),
),
}
)
# only staff endpoints from here onwards
email = (
get_email_from_secret(request.json["secret"])
if "secret" in request.json
else get_email(request)
)
if not is_admin(email, course):
abort(401)
if request.path.endswith("fetch_staff_data"):
pass
elif request.path.endswith("add_announcement"):
announcement = request.json["announcement"]
announcement["timestamp"] = time.time()
ref = (
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.document()
)
ref.set(announcement)
spoken_message = announcement.get("spoken_message", announcement["message"])
if spoken_message:
audio = generate_audio(spoken_message)
db.collection("exam-alerts").document(exam).collection(
"announcement_audio"
).document(ref.id).set({"audio": audio})
elif request.path.endswith("clear_announcements"):
clear_collection(
db,
db.collection("exam-alerts").document(exam).collection("announcements"),
)
clear_collection(
db,
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio"),
)
elif request.path.endswith("delete_announcement"):
target = request.json["id"]
db.collection("exam-alerts").document(exam).collection(
"announcements"
).document(target).delete()
else:
abort(404)
# all staff endpoints return an updated state
exam_data = db.collection("exam-alerts").document(exam).get().to_dict()
announcements = sorted(
(
{"id": announcement.id, **announcement.to_dict()}
for announcement in db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
),
key=lambda announcement: announcement["timestamp"],
reverse=True,
)
return jsonify(
{"success": True, "exam": exam_data, "announcements": announcements}
)
except Exception as e:
if getenv("ENV") == "dev":
raise
print(e)
print(dict(request.json))
return jsonify({"success": False})
| 31.844221 | 88 | 0.504024 | import time
from os import getenv
from flask import jsonify, abort
from google.cloud import firestore
from google.oauth2 import id_token
from google.auth.transport import requests as g_requests
from api import (
process_ok_exam_upload,
is_admin,
clear_collection,
get_announcements,
get_email_from_secret,
generate_audio,
)
CLIENT_ID = "713452892775-59gliacuhbfho8qvn4ctngtp3858fgf9.apps.googleusercontent.com"
DEV_EMAIL = getenv("DEV_EMAIL", "exam-test@berkeley.edu")
def update_cache():
global main_html, main_js
with open("static/index.html") as f:
main_html = f.read()
with open("static/main.js") as f:
main_js = f.read()
update_cache()
def get_email(request):
if getenv("ENV") == "dev":
return DEV_EMAIL
token = request.json["token"]
id_info = id_token.verify_oauth2_token(token, g_requests.Request(), CLIENT_ID)
if id_info["iss"] not in ["accounts.google.com", "https://accounts.google.com"]:
raise ValueError("Wrong issuer.")
return id_info["email"]
def index(request):
try:
if getenv("ENV") == "dev":
update_cache()
db = firestore.Client()
if request.path.endswith("main.js"):
return main_js
if request.path.endswith("list_exams"):
return jsonify(
db.collection("exam-alerts")
.document("all")
.get()
.to_dict()["exam-list"]
)
if request.path == "/" or request.json is None:
return main_html
if request.path.endswith("upload_ok_exam"):
process_ok_exam_upload(db, request.json["data"], request.json["secret"])
return jsonify({"success": True})
exam = request.json["exam"]
course = exam.split("-")[0]
if request.path.endswith("fetch_data"):
received_audio = request.json.get("receivedAudio")
email = get_email(request)
student_data = (
db.collection("exam-alerts")
.document(exam)
.collection("students")
.document(email)
.get()
.to_dict()
)
announcements = list(
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
)
return jsonify(
{
"success": True,
"exam_type": "ok-exam",
"questions": [],
"startTime": student_data["start_time"],
"endTime": student_data["end_time"],
"announcements": get_announcements(
student_data,
announcements,
received_audio,
lambda x: (
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio")
.document(x)
.get()
.to_dict()
or {}
).get("audio"),
),
}
)
email = (
get_email_from_secret(request.json["secret"])
if "secret" in request.json
else get_email(request)
)
if not is_admin(email, course):
abort(401)
if request.path.endswith("fetch_staff_data"):
pass
elif request.path.endswith("add_announcement"):
announcement = request.json["announcement"]
announcement["timestamp"] = time.time()
ref = (
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.document()
)
ref.set(announcement)
spoken_message = announcement.get("spoken_message", announcement["message"])
if spoken_message:
audio = generate_audio(spoken_message)
db.collection("exam-alerts").document(exam).collection(
"announcement_audio"
).document(ref.id).set({"audio": audio})
elif request.path.endswith("clear_announcements"):
clear_collection(
db,
db.collection("exam-alerts").document(exam).collection("announcements"),
)
clear_collection(
db,
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio"),
)
elif request.path.endswith("delete_announcement"):
target = request.json["id"]
db.collection("exam-alerts").document(exam).collection(
"announcements"
).document(target).delete()
else:
abort(404)
exam_data = db.collection("exam-alerts").document(exam).get().to_dict()
announcements = sorted(
(
{"id": announcement.id, **announcement.to_dict()}
for announcement in db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
),
key=lambda announcement: announcement["timestamp"],
reverse=True,
)
return jsonify(
{"success": True, "exam": exam_data, "announcements": announcements}
)
except Exception as e:
if getenv("ENV") == "dev":
raise
print(e)
print(dict(request.json))
return jsonify({"success": False})
| true | true |
f7206ebe8fad56aa6688ff5f36b4dc7a3840b16f | 413 | py | Python | backend/app/models/user.py | Infam852/IoT-project | 673d8a96676e046331550b9c16c0610de5733f73 | [
"MIT"
] | null | null | null | backend/app/models/user.py | Infam852/IoT-project | 673d8a96676e046331550b9c16c0610de5733f73 | [
"MIT"
] | null | null | null | backend/app/models/user.py | Infam852/IoT-project | 673d8a96676e046331550b9c16c0610de5733f73 | [
"MIT"
] | 1 | 2021-12-18T19:33:01.000Z | 2021-12-18T19:33:01.000Z | from sqlalchemy import Column, Integer, ForeignKey, DateTime
from sqlalchemy.sql.sqltypes import Boolean, String
from app.db.database import Base
class UserModel(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, nullable=False, index=True)
disabled = Column(Boolean, default=False)
hashed_password = Column(String, nullable=False)
| 29.5 | 60 | 0.755448 | from sqlalchemy import Column, Integer, ForeignKey, DateTime
from sqlalchemy.sql.sqltypes import Boolean, String
from app.db.database import Base
class UserModel(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, nullable=False, index=True)
disabled = Column(Boolean, default=False)
hashed_password = Column(String, nullable=False)
| true | true |
f7206ec0c3d11ce24235924a1f5bf4631efd543f | 1,715 | py | Python | tools/generate_taint_models/tests/get_exit_nodes_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_exit_nodes_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_exit_nodes_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Callable
from ..get_exit_nodes import ExitNodeGenerator
from ..model_generator import Configuration
from .test_functions import __name__ as qualifier, all_functions
class GetExitNodesTest(unittest.TestCase):
def test_compute_models(self):
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
Configuration.whitelisted_views = [f"{qualifier}.TestClass.methodA"]
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
| 41.829268 | 81 | 0.549271 |
import unittest
from typing import Callable
from ..get_exit_nodes import ExitNodeGenerator
from ..model_generator import Configuration
from .test_functions import __name__ as qualifier, all_functions
class GetExitNodesTest(unittest.TestCase):
def test_compute_models(self):
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
Configuration.whitelisted_views = [f"{qualifier}.TestClass.methodA"]
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
| true | true |
f7206f20aeb3af06e52189f7a1589c4376b4e9c4 | 1,677 | py | Python | 09.py | Michanix/Algorithms-Intro-Course | c81fa38b05199a42eaeb48567447ee3f6b1e535e | [
"MIT"
] | null | null | null | 09.py | Michanix/Algorithms-Intro-Course | c81fa38b05199a42eaeb48567447ee3f6b1e535e | [
"MIT"
] | null | null | null | 09.py | Michanix/Algorithms-Intro-Course | c81fa38b05199a42eaeb48567447ee3f6b1e535e | [
"MIT"
] | null | null | null | from random import randrange
from time import time
def bubble_sort(arr):
for i in range(len(arr)):
for j in range(len(arr)-1, i, -1):
if arr[j] < arr[j-1]:
# меняем элементы местами
arr[j], arr[j-1] = arr[j-1], arr[j]
return arr
def opt_bubble_sort(arr):
while True:
swap = False
for i in range(len(arr)-1):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
swap = True
if not swap:
break
swap = False
for j in range(len(arr)-1, 0):
if arr[j] < arr[j+1]:
# меняем элементы местами
arr[j], arr[j+1] = arr[j+1], arr[j]
swap = True
return arr
# измерить время работы алгоритма в случайом массиве
def check_time_in_random_arr(f):
arr = [randrange(100) for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
# время работы алгоритма в сортированном массиве
def check_time(f):
arr = [i for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
bubble_sort_time = check_time(bubble_sort)
opt_bubble_sort_time = check_time(opt_bubble_sort)
bubble_sort_time2 = check_time_in_random_arr(bubble_sort)
opt_bubble_sort_time2 = check_time_in_random_arr(opt_bubble_sort)
print('''
Время работы в уже отсортированном массиве:\n
Обычный пузырёк: {}\n
Модифицированный {}\n
Время работы в случайном массиве: \n
Обычный пузырёк: {}\n
Модифицированный: {}'''.format(bubble_sort_time, opt_bubble_sort_time, bubble_sort_time2, opt_bubble_sort_time2))
| 26.203125 | 117 | 0.603459 | from random import randrange
from time import time
def bubble_sort(arr):
for i in range(len(arr)):
for j in range(len(arr)-1, i, -1):
if arr[j] < arr[j-1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
return arr
def opt_bubble_sort(arr):
while True:
swap = False
for i in range(len(arr)-1):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
swap = True
if not swap:
break
swap = False
for j in range(len(arr)-1, 0):
if arr[j] < arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
swap = True
return arr
def check_time_in_random_arr(f):
arr = [randrange(100) for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
def check_time(f):
arr = [i for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
bubble_sort_time = check_time(bubble_sort)
opt_bubble_sort_time = check_time(opt_bubble_sort)
bubble_sort_time2 = check_time_in_random_arr(bubble_sort)
opt_bubble_sort_time2 = check_time_in_random_arr(opt_bubble_sort)
print('''
Время работы в уже отсортированном массиве:\n
Обычный пузырёк: {}\n
Модифицированный {}\n
Время работы в случайном массиве: \n
Обычный пузырёк: {}\n
Модифицированный: {}'''.format(bubble_sort_time, opt_bubble_sort_time, bubble_sort_time2, opt_bubble_sort_time2))
| true | true |
f72071c15f22bdfc2b6d13e6fb45864f32d756e1 | 5,175 | py | Python | nay/scrape_nominate_movie.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | 1 | 2019-08-05T21:43:09.000Z | 2019-08-05T21:43:09.000Z | nay/scrape_nominate_movie.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | 3 | 2020-03-31T05:53:37.000Z | 2021-12-13T20:07:39.000Z | nay/scrape_nominate_movie.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | null | null | null | import re
import sys
import json
import requests
from bs4 import BeautifulSoup
def scrape_nominate_movie(year):
film_index = "https://eiga.com/movie/"
re_time = re.compile(r"/\d*分/")
re_production_studio = re.compile(r"配給:[^<]*")
re_title = re.compile(r"映画「[^」]*」")
re_date = re.compile(r"\d*年\d*月\d*日")
year_film_data = []
# title aligns with eiga.com
best_prize_title = [
'万引き家族',
'三度目の殺人',
'シン・ゴジラ',
'海街diary',
'永遠の0',
'舟を編む',
'桐島、部活やめるってよ',
'八日目の蟬',
'告白',
'沈まぬ太陽',
'おくりびと',
'東京タワー オカンとボクと、時々、オトン',
'フラガール',
'ALWAYS 三丁目の夕日',
'半落ち',
'壬生義士伝',
'たそがれ清兵衛',
'千と千尋の神隠し',
'雨あがる',
'鉄道員(ぽっぽや)',
'愛を乞うひと',
'もののけ姫',
'Shall we ダンス?',
'午後の遺言状',
'忠臣蔵外伝 四谷怪談',
'学校',
'シコふんじゃった。',
'息子',
'少年時代',
'黒い雨',
'敦煌',
'マルサの女',
'火宅の人',
'花いちもんめ',
'お葬式',
'楢山節考',
'蒲田行進曲',
'駅 STATION',
'ツィゴイネルワイゼン',
'復讐するは我にあり',
'事件',
'幸福の黄色いハンカチ',
]
with open("nominate_id/" + str(year) + ".txt", "r") as f:
for line in f.readlines():
film_id = line.strip()
film_data = {}
film_data["director"] = []
film_data["scriptwriter"] = []
film_data["performers"] = []
film_data["screen_time"] = -1
film_data["production_studio"] = ""
film_data["title"] = ""
film_data["year"] = year
if len(year_film_data) == 0 and year != 2020:
film_data["prize"] = 1
else:
film_data["prize"] = 0
# fetch top-1 movie result information
content = requests.get(film_index + film_id).content
soup = BeautifulSoup(content, features="lxml")
# filter out screen time and production studio
html_text = soup.prettify()
production_studio = re_production_studio.search(html_text)
screen_time = re_time.search(html_text)
title = re_title.search(html_text)
date = re_date.search(html_text)
if production_studio:
film_data["production_studio"] = (
production_studio.group(0)[3:].strip())
if screen_time:
film_data["screen_time"] = int(screen_time.group(0)[1:-2])
if title:
film_data["title"] = title.group(0)[3:-1]
if film_data["title"] in best_prize_title:
film_data["prize"] = 1
else:
print(film_id)
if date:
date_str = date.group(0)
film_data["year"] = date_str[0:date_str.find("年")]
film_data["month"] = (
date_str[date_str.find("年") + 1:date_str.find("月")])
film_data["day"] = (
date_str[date_str.find("月") + 1:date_str.find("日")])
# filter out informative data
staff_cast = soup.find(id="staff-cast")
if staff_cast is not None:
for div in staff_cast.find_all():
# When calling div["class"], return type is list[string]
if div.name == "dl" and div.has_attr("class") and div["class"][0] == "movie-staff":
# movie staff column
data_type = ""
for p in div.find_all():
if p.name == "dt":
if p.get_text().find("監督") != -1:
data_type = "director"
elif p.get_text().find("脚本") != -1:
data_type = "scriptwriter"
else:
data_type = ""
# new meta data type can be added here
elif p.name == "dd" and len(data_type) > 0:
film_data[data_type].append(p.get_text().strip())
elif div.name == "ul" and div.has_attr("class") and div["class"][0] == "movie-cast":
# movie cast column
for p in div.find_all():
if p.name == "span":
film_data["performers"].append(p.get_text().strip())
# print(film_data)
year_film_data.append(film_data)
sys.stdout.flush()
return year_film_data
def main():
start_year = 1978
end_year = 2020
years_dict = {}
unique_id = 10
for i in range(start_year, end_year + 1):
years_dict[i] = scrape_nominate_movie(i)
for j in range(len(years_dict[i])):
years_dict[i][j]["id"] = unique_id
unique_id += 1
with open("nominate_movie_meta_data.json", "w") as f:
f.write(json.dumps(years_dict, ensure_ascii=False))
f.write("\n")
if __name__ == "__main__":
main()
| 32.753165 | 104 | 0.465507 | import re
import sys
import json
import requests
from bs4 import BeautifulSoup
def scrape_nominate_movie(year):
film_index = "https://eiga.com/movie/"
re_time = re.compile(r"/\d*分/")
re_production_studio = re.compile(r"配給:[^<]*")
re_title = re.compile(r"映画「[^」]*」")
re_date = re.compile(r"\d*年\d*月\d*日")
year_film_data = []
best_prize_title = [
'万引き家族',
'三度目の殺人',
'シン・ゴジラ',
'海街diary',
'永遠の0',
'舟を編む',
'桐島、部活やめるってよ',
'八日目の蟬',
'告白',
'沈まぬ太陽',
'おくりびと',
'東京タワー オカンとボクと、時々、オトン',
'フラガール',
'ALWAYS 三丁目の夕日',
'半落ち',
'壬生義士伝',
'たそがれ清兵衛',
'千と千尋の神隠し',
'雨あがる',
'鉄道員(ぽっぽや)',
'愛を乞うひと',
'もののけ姫',
'Shall we ダンス?',
'午後の遺言状',
'忠臣蔵外伝 四谷怪談',
'学校',
'シコふんじゃった。',
'息子',
'少年時代',
'黒い雨',
'敦煌',
'マルサの女',
'火宅の人',
'花いちもんめ',
'お葬式',
'楢山節考',
'蒲田行進曲',
'駅 STATION',
'ツィゴイネルワイゼン',
'復讐するは我にあり',
'事件',
'幸福の黄色いハンカチ',
]
with open("nominate_id/" + str(year) + ".txt", "r") as f:
for line in f.readlines():
film_id = line.strip()
film_data = {}
film_data["director"] = []
film_data["scriptwriter"] = []
film_data["performers"] = []
film_data["screen_time"] = -1
film_data["production_studio"] = ""
film_data["title"] = ""
film_data["year"] = year
if len(year_film_data) == 0 and year != 2020:
film_data["prize"] = 1
else:
film_data["prize"] = 0
content = requests.get(film_index + film_id).content
soup = BeautifulSoup(content, features="lxml")
html_text = soup.prettify()
production_studio = re_production_studio.search(html_text)
screen_time = re_time.search(html_text)
title = re_title.search(html_text)
date = re_date.search(html_text)
if production_studio:
film_data["production_studio"] = (
production_studio.group(0)[3:].strip())
if screen_time:
film_data["screen_time"] = int(screen_time.group(0)[1:-2])
if title:
film_data["title"] = title.group(0)[3:-1]
if film_data["title"] in best_prize_title:
film_data["prize"] = 1
else:
print(film_id)
if date:
date_str = date.group(0)
film_data["year"] = date_str[0:date_str.find("年")]
film_data["month"] = (
date_str[date_str.find("年") + 1:date_str.find("月")])
film_data["day"] = (
date_str[date_str.find("月") + 1:date_str.find("日")])
staff_cast = soup.find(id="staff-cast")
if staff_cast is not None:
for div in staff_cast.find_all():
if div.name == "dl" and div.has_attr("class") and div["class"][0] == "movie-staff":
data_type = ""
for p in div.find_all():
if p.name == "dt":
if p.get_text().find("監督") != -1:
data_type = "director"
elif p.get_text().find("脚本") != -1:
data_type = "scriptwriter"
else:
data_type = ""
elif p.name == "dd" and len(data_type) > 0:
film_data[data_type].append(p.get_text().strip())
elif div.name == "ul" and div.has_attr("class") and div["class"][0] == "movie-cast":
for p in div.find_all():
if p.name == "span":
film_data["performers"].append(p.get_text().strip())
year_film_data.append(film_data)
sys.stdout.flush()
return year_film_data
def main():
start_year = 1978
end_year = 2020
years_dict = {}
unique_id = 10
for i in range(start_year, end_year + 1):
years_dict[i] = scrape_nominate_movie(i)
for j in range(len(years_dict[i])):
years_dict[i][j]["id"] = unique_id
unique_id += 1
with open("nominate_movie_meta_data.json", "w") as f:
f.write(json.dumps(years_dict, ensure_ascii=False))
f.write("\n")
if __name__ == "__main__":
main()
| true | true |
f72073fa7cc1e6f079942989602618bed6ed1f0a | 9,216 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_available_private_endpoint_types_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_available_private_endpoint_types_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_available_private_endpoint_types_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailablePrivateEndpointTypesOperations:
"""AvailablePrivateEndpointTypesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailablePrivateEndpointTypesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
def list_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailablePrivateEndpointTypesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
| 48.761905 | 210 | 0.659397 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailablePrivateEndpointTypesOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'}
def list_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'}
| true | true |
f72076a759856b30a8e2638c441c193c5f2894fe | 3,023 | py | Python | rally_openstack/scenarios/gnocchi/archive_policy.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/scenarios/gnocchi/archive_policy.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/scenarios/gnocchi/archive_policy.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | 1 | 2018-12-10T12:31:27.000Z | 2018-12-10T12:31:27.000Z | # Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack import consts
from rally_openstack import scenario
from rally_openstack.scenarios.gnocchi import utils as gnocchiutils
"""Scenarios for Gnocchi archive policy."""
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicy.list_archive_policy")
class ListArchivePolicy(gnocchiutils.GnocchiBase):
def run(self):
"""List archive policies."""
self.gnocchi.list_archive_policy()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_archive_policy")
class CreateArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
"""Create archive policy.
:param definition: List of definitions
:param aggregation_methods: List of aggregation methods
"""
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_delete_archive_policy")
class CreateDeleteArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
"""Create archive policy and then delete it.
:param definition: List of definitions
:param aggregation_methods: List of aggregation methods
"""
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
self.admin_gnocchi.delete_archive_policy(name)
| 38.75641 | 78 | 0.7261 |
from rally.task import validation
from rally_openstack import consts
from rally_openstack import scenario
from rally_openstack.scenarios.gnocchi import utils as gnocchiutils
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicy.list_archive_policy")
class ListArchivePolicy(gnocchiutils.GnocchiBase):
def run(self):
self.gnocchi.list_archive_policy()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_archive_policy")
class CreateArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_delete_archive_policy")
class CreateDeleteArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
self.admin_gnocchi.delete_archive_policy(name)
| true | true |
f720771fafd95ebfe23bca7f24afc2f571e9f07b | 51,158 | py | Python | tests/test_dataset.py | jenhaoyang/datumaro | add81ddb59502362fa65fa07e5bc4d8c9f61afde | [
"MIT"
] | null | null | null | tests/test_dataset.py | jenhaoyang/datumaro | add81ddb59502362fa65fa07e5bc4d8c9f61afde | [
"MIT"
] | null | null | null | tests/test_dataset.py | jenhaoyang/datumaro | add81ddb59502362fa65fa07e5bc4d8c9f61afde | [
"MIT"
] | 1 | 2021-12-15T22:15:59.000Z | 2021-12-15T22:15:59.000Z | from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Bbox, Caption, Label, LabelCategories, Mask, Points,
Polygon, PolyLine,
)
from datumaro.components.converter import Converter
from datumaro.components.dataset import (
DEFAULT_FORMAT, Dataset, ItemStatus, eager_mode,
)
from datumaro.components.dataset_filter import (
DatasetItemEncoder, XPathAnnotationsFilter, XPathDatasetFilter,
)
from datumaro.components.environment import Environment
from datumaro.components.errors import (
ConflictingCategoriesError, DatasetNotFoundError, MultipleFormatsMatchError,
NoMatchingFormatsError, RepeatedItemError, UnknownFormatError,
)
from datumaro.components.extractor import (
DEFAULT_SUBSET_NAME, DatasetItem, Extractor, ItemTransform, Transform,
)
from datumaro.components.launcher import Launcher
from datumaro.components.media import Image
from datumaro.util.test_utils import TestDir, compare_datasets
from .requirements import Requirements, mark_requirement
class DatasetTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_create_from_extractors(self):
class SrcExtractor1(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
]),
])
class SrcExtractor2(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='val', annotations=[
Label(5),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
Label(5),
]),
])
dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2())
compare_datasets(self, DstExtractor(), dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_from_iterable(self):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
])
def categories(self):
return { AnnotationType.label: LabelCategories.from_iterable(
['a', 'b', 'c', 'd', 'e'])
}
actual = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
], categories=['a', 'b', 'c', 'd', 'e'])
compare_datasets(self, TestExtractor(), actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets_with_empty_categories(self):
expected = Dataset.from_iterable([
DatasetItem(1, annotations=[
Label(0),
Bbox(1, 2, 3, 4),
Caption('hello world'),
])
], categories=['a'])
src1 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Bbox(1, 2, 3, 4, label=None) ])
], categories=[])
src2 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Label(0) ])
], categories=['a'])
src3 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Caption('hello world') ])
])
actual = Dataset.from_extractors(src1, src2, src3)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
loaded_dataset = Dataset.load(test_dir)
compare_datasets(self, source_dataset, loaded_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
dataset.save(test_dir)
detected_format = Dataset.detect(test_dir, env=env)
self.assertEqual(DEFAULT_FORMAT, detected_format)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_and_import(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
imported_dataset = Dataset.import_from(test_dir, env=env)
self.assertEqual(imported_dataset.data_path, test_dir)
self.assertEqual(imported_dataset.format, DEFAULT_FORMAT)
compare_datasets(self, source_dataset, imported_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_dataset_found(self):
env = Environment()
env.importers.items = {
DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT],
}
with TestDir() as test_dir, self.assertRaises(DatasetNotFoundError):
Dataset.import_from(test_dir, DEFAULT_FORMAT, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_multiple_formats_match(self):
env = Environment()
env.importers.items = {
'a': env.importers[DEFAULT_FORMAT],
'b': env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
'a': env.extractors[DEFAULT_FORMAT],
'b': env.extractors[DEFAULT_FORMAT],
}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(MultipleFormatsMatchError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_matching_formats(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(NoMatchingFormatsError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_unknown_format_requested(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(UnknownFormatError):
Dataset.import_from(test_dir, format='custom', env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_export_by_string_format_name(self):
env = Environment()
env.converters.items = {'qq': env.converters[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'], env=env)
with TestDir() as test_dir:
dataset.export(format='qq', save_dir=test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_remember_export_options(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((1, 2, 3))),
], categories=['a'])
with TestDir() as test_dir:
dataset.save(test_dir, save_images=True)
dataset.put(dataset.get(1)) # mark the item modified for patching
image_path = osp.join(test_dir, 'images', 'default', '1.jpg')
os.remove(image_path)
dataset.save(test_dir)
self.assertEqual({'save_images': True}, dataset.options)
self.assertTrue(osp.isfile(image_path))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_scratch(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3))
dataset.remove(1)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_extractor(self):
class TestExtractor(Extractor):
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
]
dataset = Dataset.from_extractors(TestExtractor())
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_sequence(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
])
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_by_string_name(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
env = Environment()
env.transforms.register('qq', TestTransform)
dataset = Dataset.from_iterable([ DatasetItem(id=1) ], env=env)
actual = dataset.transform('qq')
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
dataset = Dataset.from_iterable([ DatasetItem(id=1) ])
actual = dataset.transform(TestTransform)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_annotations(self):
a = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
])
], categories=['a', 'b', 'c', 'd'])
b = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
expected = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
merged = Dataset.from_extractors(a, b)
compare_datasets(self, expected, merged)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_join_different_categories(self):
s1 = Dataset.from_iterable([], categories=['a', 'b'])
s2 = Dataset.from_iterable([], categories=['b', 'a'])
with self.assertRaises(ConflictingCategoriesError):
Dataset.from_extractors(s1, s2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets(self):
s1 = Dataset.from_iterable([ DatasetItem(0), DatasetItem(1) ])
s2 = Dataset.from_iterable([ DatasetItem(1), DatasetItem(2) ])
expected = Dataset.from_iterable([
DatasetItem(0), DatasetItem(1), DatasetItem(2)
])
actual = Dataset.from_extractors(s1, s2)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_addition(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.put(DatasetItem(3, subset='a'))
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_removal(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.remove(1)
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.added,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch_when_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.init_cache()
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
# Item was not changed from the original one.
# TODO: add item comparison and remove this line
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_mixed(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
class Remove1(Transform):
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
class Add3(Transform):
def __iter__(self):
for item in self._extractor:
if item.id == '2':
yield item
yield DatasetItem(3, subset='a')
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 2) # 1 for items, 1 for list
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_intermixed_with_direct_ops(self):
expected = Dataset.from_iterable([
DatasetItem(3, subset='a'),
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.put(DatasetItem(4))
dataset.transform(Remove1)
dataset.put(DatasetItem(5))
dataset.remove(2)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('3', 'a'): ItemStatus.added,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(3, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_local_transforms_stacked(self):
expected = Dataset.from_iterable([
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class ShiftIds(ItemTransform):
def transform_item(self, item):
return item.wrap(id=int(item.id) + 1)
dataset = Dataset.from_extractors(TestExtractor())
dataset.remove(2)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.put(DatasetItem(5))
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(None, patch.data.get(3))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained_and_source_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified, # TODO: remove this
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 1) # 1 for items and list
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_put_and_remove(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
self.assertFalse(dataset.is_cache_initialized)
dataset.put(DatasetItem(3))
dataset.remove(DatasetItem(1))
self.assertFalse(dataset.is_cache_initialized)
self.assertFalse(iter_called)
dataset.init_cache()
self.assertTrue(dataset.is_cache_initialized)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
self.assertTrue((1, '') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_get_on_updated_item(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.put(DatasetItem(2))
self.assertTrue((2, '') in dataset)
self.assertFalse(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_global(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
with eager_mode():
Dataset.from_extractors(TestExtractor())
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_local(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
with eager_mode(dataset=dataset):
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_select(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertEqual(iter_called, 0)
self.assertEqual(1, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_chain_lazy_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(3, int(min(int(item.id) for item in dataset)))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_raises_when_repeated_items_in_source(self):
dataset = Dataset.from_iterable([DatasetItem(0), DatasetItem(0)])
with self.assertRaises(RepeatedItemError):
dataset.init_cache()
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_check_item_existence(self):
dataset = Dataset.from_iterable([
DatasetItem(0, subset='a'), DatasetItem(1)
])
self.assertTrue(DatasetItem(0, subset='a') in dataset)
self.assertFalse(DatasetItem(0, subset='b') in dataset)
self.assertTrue((0, 'a') in dataset)
self.assertFalse((0, 'b') in dataset)
self.assertTrue(1 in dataset)
self.assertFalse(0 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put_with_id_override(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(0, subset='a'), id=2, subset='b')
self.assertTrue((2, 'b') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_cache_with_empty_source(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(2))
dataset.init_cache()
self.assertTrue(2 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_do_partial_caching_in_get_when_default(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(1, iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_partial_caching_in_get_when_redefined(self):
iter_called = 0
get_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
def get(self, id, subset=None):
nonlocal get_called
get_called += 1
return DatasetItem(id, subset=subset)
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(0, iter_called)
self.assertEqual(2, get_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_binds_on_save(self):
dataset = Dataset.from_iterable([DatasetItem(1)])
self.assertFalse(dataset.is_bound)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertTrue(dataset.is_bound)
self.assertEqual(dataset.data_path, test_dir)
self.assertEqual(dataset.format, DEFAULT_FORMAT)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_flushes_changes_on_save(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(1))
self.assertTrue(dataset.is_modified)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_does_not_load_images_on_saving(self):
# Issue https://github.com/openvinotoolkit/datumaro/issues/177
# Missing image metadata (size etc.) can lead to image loading on
# dataset save without image saving
called = False
def test_loader():
nonlocal called
called = True
dataset = Dataset.from_iterable([
DatasetItem(1, image=test_loader)
])
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_labels(self):
expected = Dataset.from_iterable([], categories=['c', 'b'])
dataset = Dataset.from_iterable([], categories=['a', 'b'])
actual = dataset.transform('remap_labels', {'a': 'c'})
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_run_model(self):
dataset = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]))
for i in range(5)
], categories=['label'])
batch_size = 3
expected = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]), annotations=[
Label(0, attributes={ 'idx': i % batch_size, 'data': i })
])
for i in range(5)
], categories=['label'])
calls = 0
class TestLauncher(Launcher):
def launch(self, inputs):
nonlocal calls
calls += 1
for i, inp in enumerate(inputs):
yield [ Label(0, attributes={'idx': i, 'data': inp.item()}) ]
model = TestLauncher()
actual = dataset.run_model(model, batch_size=batch_size)
compare_datasets(self, expected, actual, require_images=True)
self.assertEqual(2, calls)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_257)
def test_filter_registers_changes(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual({
('0', 'train'): ItemStatus.removed,
('1', 'test'): ItemStatus.modified, # TODO: remove this line
}, dataset.get_patch().updated_items)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0), Label(1)]),
DatasetItem(id=1, subset='val', annotations=[Label(2)]),
DatasetItem(id=2, subset='test', annotations=[Label(0), Label(2)]),
], categories=['a', 'b', 'c'])
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([ DatasetItem(id=0, subset='train') ]),
Dataset.from_iterable([ DatasetItem(id=1, subset='test') ]),
)
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=1, subset='val', annotations=[Label(1)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=2, subset='test', annotations=[Label(2)]),
], categories=['a', 'b', 'c']),
)
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data(self):
class CustomConverter(Converter):
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
assert osp.isdir(self._save_dir)
for item in self._extractor:
name = f'{item.subset}_{item.id}'
with open(osp.join(
self._save_dir, name + '.txt'), 'w') as f:
f.write('\n')
if self._save_images and \
item.has_image and item.image.has_data:
self._save_image(item, name=name)
env = Environment()
env.converters.items = { 'test': CustomConverter }
with TestDir() as path:
dataset = Dataset.from_iterable([
DatasetItem(1, subset='train', image=np.ones((2, 4, 3))),
DatasetItem(2, subset='train',
image=Image(path='2.jpg', size=(3, 2))),
DatasetItem(3, subset='valid', image=np.ones((2, 2, 3))),
], categories=[], env=env)
dataset.export(path, 'test', save_images=True)
dataset.put(DatasetItem(2, subset='train', image=np.ones((3, 2, 3))))
dataset.remove(3, 'valid')
dataset.save(save_images=True)
self.assertEqual({
'train_1.txt', 'train_1.jpg',
'train_2.txt', 'train_2.jpg'
},
set(os.listdir(path)))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_overwrites_matching_items(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['a', 'b'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_reorder_labels(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['b', 'a'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ])
], categories=['a', 'b'])
# Note that label id and categories are changed
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=0) ])
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_project_labels(self):
dataset = Dataset.from_iterable([
# Must be overridden
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 3, label=0),
]),
# Must be kept
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
], categories=['a', 'b'])
patch = Dataset.from_iterable([
# Must override
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=0), # Label must be remapped
Bbox(5, 6, 2, 3, label=1), # Label must be remapped
Bbox(2, 2, 2, 3, label=2), # Will be dropped due to label
]),
# Must be added
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=1) # Label must be remapped
]),
], categories=['b', 'a', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=1),
Bbox(5, 6, 2, 3, label=0),
]),
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=0)
]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset, ignored_attrs='*')
class DatasetItemTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctor_requires_id(self):
with self.assertRaises(Exception):
# pylint: disable=no-value-for-parameter
DatasetItem()
# pylint: enable=no-value-for-parameter
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctors_with_image():
for args in [
{ 'id': 0, 'image': None },
{ 'id': 0, 'image': 'path.jpg' },
{ 'id': 0, 'image': np.array([1, 2, 3]) },
{ 'id': 0, 'image': lambda f: np.array([1, 2, 3]) },
{ 'id': 0, 'image': Image(data=np.array([1, 2, 3])) },
]:
DatasetItem(**args)
class DatasetFilterTest(TestCase):
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_representations():
item = DatasetItem(id=1, subset='subset',
image=np.ones((5, 4, 3)),
annotations=[
Label(0, attributes={'a1': 1, 'a2': '2'}, id=1, group=2),
Caption('hello', id=1),
Caption('world', group=5),
Label(2, id=3, attributes={ 'x': 1, 'y': '2' }),
Bbox(1, 2, 3, 4, label=4, id=4, attributes={ 'a': 1.0 }),
Bbox(5, 6, 7, 8, id=5, group=5),
Points([1, 2, 2, 0, 1, 1], label=0, id=5),
Mask(id=5, image=np.ones((3, 2))),
Mask(label=3, id=5, image=np.ones((2, 3))),
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),
Polygon([1, 2, 3, 4, 5, 6, 7, 8]),
]
)
encoded = DatasetItemEncoder.encode(item)
DatasetItemEncoder.to_string(encoded)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_filter_can_be_applied(self):
class TestExtractor(Extractor):
def __iter__(self):
for i in range(4):
yield DatasetItem(id=i, subset='train')
extractor = TestExtractor()
filtered = XPathDatasetFilter(extractor, '/item[id > 1]')
self.assertEqual(2, len(filtered))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_be_applied(self):
class SrcExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
]),
DatasetItem(id=2, annotations=[
Label(0),
]),
])
extractor = SrcExtractor()
filtered = XPathAnnotationsFilter(extractor,
'/item/annotation[label_id = 0]')
self.assertListEqual(list(filtered), list(DstExtractor()))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_remove_empty_items(self):
source = Dataset.from_iterable([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
], categories=['a', 'b', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=2, annotations=[Label(2)]),
], categories=['a', 'b', 'c'])
filtered = XPathAnnotationsFilter(source,
'/item/annotation[label_id = 2]', remove_empty=True)
compare_datasets(self, expected, filtered)
| 33.371168 | 81 | 0.572403 | from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Bbox, Caption, Label, LabelCategories, Mask, Points,
Polygon, PolyLine,
)
from datumaro.components.converter import Converter
from datumaro.components.dataset import (
DEFAULT_FORMAT, Dataset, ItemStatus, eager_mode,
)
from datumaro.components.dataset_filter import (
DatasetItemEncoder, XPathAnnotationsFilter, XPathDatasetFilter,
)
from datumaro.components.environment import Environment
from datumaro.components.errors import (
ConflictingCategoriesError, DatasetNotFoundError, MultipleFormatsMatchError,
NoMatchingFormatsError, RepeatedItemError, UnknownFormatError,
)
from datumaro.components.extractor import (
DEFAULT_SUBSET_NAME, DatasetItem, Extractor, ItemTransform, Transform,
)
from datumaro.components.launcher import Launcher
from datumaro.components.media import Image
from datumaro.util.test_utils import TestDir, compare_datasets
from .requirements import Requirements, mark_requirement
class DatasetTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_create_from_extractors(self):
class SrcExtractor1(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
]),
])
class SrcExtractor2(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='val', annotations=[
Label(5),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
Label(5),
]),
])
dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2())
compare_datasets(self, DstExtractor(), dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_from_iterable(self):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
])
def categories(self):
return { AnnotationType.label: LabelCategories.from_iterable(
['a', 'b', 'c', 'd', 'e'])
}
actual = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
], categories=['a', 'b', 'c', 'd', 'e'])
compare_datasets(self, TestExtractor(), actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets_with_empty_categories(self):
expected = Dataset.from_iterable([
DatasetItem(1, annotations=[
Label(0),
Bbox(1, 2, 3, 4),
Caption('hello world'),
])
], categories=['a'])
src1 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Bbox(1, 2, 3, 4, label=None) ])
], categories=[])
src2 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Label(0) ])
], categories=['a'])
src3 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Caption('hello world') ])
])
actual = Dataset.from_extractors(src1, src2, src3)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
loaded_dataset = Dataset.load(test_dir)
compare_datasets(self, source_dataset, loaded_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
dataset.save(test_dir)
detected_format = Dataset.detect(test_dir, env=env)
self.assertEqual(DEFAULT_FORMAT, detected_format)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_and_import(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
imported_dataset = Dataset.import_from(test_dir, env=env)
self.assertEqual(imported_dataset.data_path, test_dir)
self.assertEqual(imported_dataset.format, DEFAULT_FORMAT)
compare_datasets(self, source_dataset, imported_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_dataset_found(self):
env = Environment()
env.importers.items = {
DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT],
}
with TestDir() as test_dir, self.assertRaises(DatasetNotFoundError):
Dataset.import_from(test_dir, DEFAULT_FORMAT, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_multiple_formats_match(self):
env = Environment()
env.importers.items = {
'a': env.importers[DEFAULT_FORMAT],
'b': env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
'a': env.extractors[DEFAULT_FORMAT],
'b': env.extractors[DEFAULT_FORMAT],
}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(MultipleFormatsMatchError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_matching_formats(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(NoMatchingFormatsError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_unknown_format_requested(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(UnknownFormatError):
Dataset.import_from(test_dir, format='custom', env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_export_by_string_format_name(self):
env = Environment()
env.converters.items = {'qq': env.converters[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'], env=env)
with TestDir() as test_dir:
dataset.export(format='qq', save_dir=test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_remember_export_options(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((1, 2, 3))),
], categories=['a'])
with TestDir() as test_dir:
dataset.save(test_dir, save_images=True)
dataset.put(dataset.get(1))
image_path = osp.join(test_dir, 'images', 'default', '1.jpg')
os.remove(image_path)
dataset.save(test_dir)
self.assertEqual({'save_images': True}, dataset.options)
self.assertTrue(osp.isfile(image_path))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_scratch(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3))
dataset.remove(1)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_extractor(self):
class TestExtractor(Extractor):
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
]
dataset = Dataset.from_extractors(TestExtractor())
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_sequence(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
])
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_by_string_name(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
env = Environment()
env.transforms.register('qq', TestTransform)
dataset = Dataset.from_iterable([ DatasetItem(id=1) ], env=env)
actual = dataset.transform('qq')
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
dataset = Dataset.from_iterable([ DatasetItem(id=1) ])
actual = dataset.transform(TestTransform)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_annotations(self):
a = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
])
], categories=['a', 'b', 'c', 'd'])
b = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
expected = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
merged = Dataset.from_extractors(a, b)
compare_datasets(self, expected, merged)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_join_different_categories(self):
s1 = Dataset.from_iterable([], categories=['a', 'b'])
s2 = Dataset.from_iterable([], categories=['b', 'a'])
with self.assertRaises(ConflictingCategoriesError):
Dataset.from_extractors(s1, s2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets(self):
s1 = Dataset.from_iterable([ DatasetItem(0), DatasetItem(1) ])
s2 = Dataset.from_iterable([ DatasetItem(1), DatasetItem(2) ])
expected = Dataset.from_iterable([
DatasetItem(0), DatasetItem(1), DatasetItem(2)
])
actual = Dataset.from_extractors(s1, s2)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_addition(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.put(DatasetItem(3, subset='a'))
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_removal(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.remove(1)
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.added,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch_when_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.init_cache()
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_mixed(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
class Remove1(Transform):
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
class Add3(Transform):
def __iter__(self):
for item in self._extractor:
if item.id == '2':
yield item
yield DatasetItem(3, subset='a')
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 2)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_intermixed_with_direct_ops(self):
expected = Dataset.from_iterable([
DatasetItem(3, subset='a'),
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.put(DatasetItem(4))
dataset.transform(Remove1)
dataset.put(DatasetItem(5))
dataset.remove(2)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('3', 'a'): ItemStatus.added,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(3, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_local_transforms_stacked(self):
expected = Dataset.from_iterable([
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class ShiftIds(ItemTransform):
def transform_item(self, item):
return item.wrap(id=int(item.id) + 1)
dataset = Dataset.from_extractors(TestExtractor())
dataset.remove(2)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.put(DatasetItem(5))
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(None, patch.data.get(3))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained_and_source_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 1)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_put_and_remove(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
self.assertFalse(dataset.is_cache_initialized)
dataset.put(DatasetItem(3))
dataset.remove(DatasetItem(1))
self.assertFalse(dataset.is_cache_initialized)
self.assertFalse(iter_called)
dataset.init_cache()
self.assertTrue(dataset.is_cache_initialized)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
self.assertTrue((1, '') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_get_on_updated_item(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.put(DatasetItem(2))
self.assertTrue((2, '') in dataset)
self.assertFalse(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_global(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
with eager_mode():
Dataset.from_extractors(TestExtractor())
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_local(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
with eager_mode(dataset=dataset):
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_select(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertEqual(iter_called, 0)
self.assertEqual(1, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_chain_lazy_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(3, int(min(int(item.id) for item in dataset)))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_raises_when_repeated_items_in_source(self):
dataset = Dataset.from_iterable([DatasetItem(0), DatasetItem(0)])
with self.assertRaises(RepeatedItemError):
dataset.init_cache()
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_check_item_existence(self):
dataset = Dataset.from_iterable([
DatasetItem(0, subset='a'), DatasetItem(1)
])
self.assertTrue(DatasetItem(0, subset='a') in dataset)
self.assertFalse(DatasetItem(0, subset='b') in dataset)
self.assertTrue((0, 'a') in dataset)
self.assertFalse((0, 'b') in dataset)
self.assertTrue(1 in dataset)
self.assertFalse(0 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put_with_id_override(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(0, subset='a'), id=2, subset='b')
self.assertTrue((2, 'b') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_cache_with_empty_source(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(2))
dataset.init_cache()
self.assertTrue(2 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_do_partial_caching_in_get_when_default(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(1, iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_partial_caching_in_get_when_redefined(self):
iter_called = 0
get_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
def get(self, id, subset=None):
nonlocal get_called
get_called += 1
return DatasetItem(id, subset=subset)
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(0, iter_called)
self.assertEqual(2, get_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_binds_on_save(self):
dataset = Dataset.from_iterable([DatasetItem(1)])
self.assertFalse(dataset.is_bound)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertTrue(dataset.is_bound)
self.assertEqual(dataset.data_path, test_dir)
self.assertEqual(dataset.format, DEFAULT_FORMAT)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_flushes_changes_on_save(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(1))
self.assertTrue(dataset.is_modified)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_does_not_load_images_on_saving(self):
called = False
def test_loader():
nonlocal called
called = True
dataset = Dataset.from_iterable([
DatasetItem(1, image=test_loader)
])
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_labels(self):
expected = Dataset.from_iterable([], categories=['c', 'b'])
dataset = Dataset.from_iterable([], categories=['a', 'b'])
actual = dataset.transform('remap_labels', {'a': 'c'})
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_run_model(self):
dataset = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]))
for i in range(5)
], categories=['label'])
batch_size = 3
expected = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]), annotations=[
Label(0, attributes={ 'idx': i % batch_size, 'data': i })
])
for i in range(5)
], categories=['label'])
calls = 0
class TestLauncher(Launcher):
def launch(self, inputs):
nonlocal calls
calls += 1
for i, inp in enumerate(inputs):
yield [ Label(0, attributes={'idx': i, 'data': inp.item()}) ]
model = TestLauncher()
actual = dataset.run_model(model, batch_size=batch_size)
compare_datasets(self, expected, actual, require_images=True)
self.assertEqual(2, calls)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_257)
def test_filter_registers_changes(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual({
('0', 'train'): ItemStatus.removed,
('1', 'test'): ItemStatus.modified,
}, dataset.get_patch().updated_items)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0), Label(1)]),
DatasetItem(id=1, subset='val', annotations=[Label(2)]),
DatasetItem(id=2, subset='test', annotations=[Label(0), Label(2)]),
], categories=['a', 'b', 'c'])
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([ DatasetItem(id=0, subset='train') ]),
Dataset.from_iterable([ DatasetItem(id=1, subset='test') ]),
)
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=1, subset='val', annotations=[Label(1)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=2, subset='test', annotations=[Label(2)]),
], categories=['a', 'b', 'c']),
)
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data(self):
class CustomConverter(Converter):
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
assert osp.isdir(self._save_dir)
for item in self._extractor:
name = f'{item.subset}_{item.id}'
with open(osp.join(
self._save_dir, name + '.txt'), 'w') as f:
f.write('\n')
if self._save_images and \
item.has_image and item.image.has_data:
self._save_image(item, name=name)
env = Environment()
env.converters.items = { 'test': CustomConverter }
with TestDir() as path:
dataset = Dataset.from_iterable([
DatasetItem(1, subset='train', image=np.ones((2, 4, 3))),
DatasetItem(2, subset='train',
image=Image(path='2.jpg', size=(3, 2))),
DatasetItem(3, subset='valid', image=np.ones((2, 2, 3))),
], categories=[], env=env)
dataset.export(path, 'test', save_images=True)
dataset.put(DatasetItem(2, subset='train', image=np.ones((3, 2, 3))))
dataset.remove(3, 'valid')
dataset.save(save_images=True)
self.assertEqual({
'train_1.txt', 'train_1.jpg',
'train_2.txt', 'train_2.jpg'
},
set(os.listdir(path)))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_overwrites_matching_items(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['a', 'b'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_reorder_labels(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['b', 'a'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ])
], categories=['a', 'b'])
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=0) ])
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_project_labels(self):
dataset = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 3, label=0),
]),
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
], categories=['a', 'b'])
patch = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=0),
Bbox(5, 6, 2, 3, label=1),
Bbox(2, 2, 2, 3, label=2),
]),
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=1)
]),
], categories=['b', 'a', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=1),
Bbox(5, 6, 2, 3, label=0),
]),
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=0)
]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset, ignored_attrs='*')
class DatasetItemTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctor_requires_id(self):
with self.assertRaises(Exception):
DatasetItem()
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctors_with_image():
for args in [
{ 'id': 0, 'image': None },
{ 'id': 0, 'image': 'path.jpg' },
{ 'id': 0, 'image': np.array([1, 2, 3]) },
{ 'id': 0, 'image': lambda f: np.array([1, 2, 3]) },
{ 'id': 0, 'image': Image(data=np.array([1, 2, 3])) },
]:
DatasetItem(**args)
class DatasetFilterTest(TestCase):
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_representations():
item = DatasetItem(id=1, subset='subset',
image=np.ones((5, 4, 3)),
annotations=[
Label(0, attributes={'a1': 1, 'a2': '2'}, id=1, group=2),
Caption('hello', id=1),
Caption('world', group=5),
Label(2, id=3, attributes={ 'x': 1, 'y': '2' }),
Bbox(1, 2, 3, 4, label=4, id=4, attributes={ 'a': 1.0 }),
Bbox(5, 6, 7, 8, id=5, group=5),
Points([1, 2, 2, 0, 1, 1], label=0, id=5),
Mask(id=5, image=np.ones((3, 2))),
Mask(label=3, id=5, image=np.ones((2, 3))),
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),
Polygon([1, 2, 3, 4, 5, 6, 7, 8]),
]
)
encoded = DatasetItemEncoder.encode(item)
DatasetItemEncoder.to_string(encoded)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_filter_can_be_applied(self):
class TestExtractor(Extractor):
def __iter__(self):
for i in range(4):
yield DatasetItem(id=i, subset='train')
extractor = TestExtractor()
filtered = XPathDatasetFilter(extractor, '/item[id > 1]')
self.assertEqual(2, len(filtered))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_be_applied(self):
class SrcExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
]),
DatasetItem(id=2, annotations=[
Label(0),
]),
])
extractor = SrcExtractor()
filtered = XPathAnnotationsFilter(extractor,
'/item/annotation[label_id = 0]')
self.assertListEqual(list(filtered), list(DstExtractor()))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_remove_empty_items(self):
source = Dataset.from_iterable([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
], categories=['a', 'b', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=2, annotations=[Label(2)]),
], categories=['a', 'b', 'c'])
filtered = XPathAnnotationsFilter(source,
'/item/annotation[label_id = 2]', remove_empty=True)
compare_datasets(self, expected, filtered)
| true | true |
f720773382e2af71b9b530986df7d022a800c635 | 4,874 | py | Python | protoseg/report.py | chriamue/protoseg | 4ddc7d613aadcb9d25b5773eff688214349ab23f | [
"MIT"
] | null | null | null | protoseg/report.py | chriamue/protoseg | 4ddc7d613aadcb9d25b5773eff688214349ab23f | [
"MIT"
] | null | null | null | protoseg/report.py | chriamue/protoseg | 4ddc7d613aadcb9d25b5773eff688214349ab23f | [
"MIT"
] | 1 | 2020-03-30T07:10:54.000Z | 2020-03-30T07:10:54.000Z |
import os
import numpy as np
import cv2
import json
import pandas as pd
import tensorflow as tf
from tensorboard.backend.event_processing import event_accumulator as ea
from matplotlib import pyplot as plt
from matplotlib import colors as colors
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style="darkgrid")
sns.set_context("paper")
from matplotlib.backends.backend_pdf import PdfPages
class Report():
def __init__(self, configs, resultspath='results/'):
self.configs = configs
self.resultspath = resultspath
assert(configs)
# source: https://github.com/JamesChuanggg/Tensorboard2Seaborn/blob/master/beautify.py
def plot(self, acc, tag='loss', smooth_space=100, color_code='#4169E1'):
x_list = []
y_list = []
x_list_raw = []
y_list_raw = []
try:
x = [int(s.step) for s in acc.Scalars(tag=tag)]
y = [s.value for s in acc.Scalars(tag=tag)]
# smooth curve
x_ = []
y_ = []
for i in range(0, len(x), smooth_space):
x_.append(x[i])
y_.append(sum(y[i:i+smooth_space]) / float(smooth_space))
x_.append(x[-1])
y_.append(y[-1])
x_list = x_
y_list = y_
# raw curve
x_list_raw = x
y_list_raw = y
except Exception as e:
print(e)
fig, ax = plt.subplots()
plt.title(tag)
plt.plot(x_list_raw, y_list_raw,
color=colors.to_rgba(color_code, alpha=0.4))
plt.plot(x_list, y_list, color=color_code, linewidth=1.5)
fig.canvas.draw()
return fig, np.array(fig.canvas.renderer._renderer)
def image(self, acc, tag='loss'):
image_list = acc.Images(tag=tag)
with tf.Session() as sess:
img = tf.image.decode_image(image_list[-1].encoded_image_string)
npimg = img.eval(session=sess)
return npimg
def generate(self):
pp = PdfPages(os.path.join(self.resultspath,
os.path.basename(self.configs.filename) + '.pdf'))
for run in self.configs:
resultpath = os.path.join(self.resultspath, run)
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
fig, img = self.plot(event_acc, tag="loss")
plt.text(0.05, 0.95, run, transform=fig.transFigure, size=24)
pp.savefig(fig)
cv2.imwrite(resultpath+'/loss.png', img)
config = self.configs.get()
for metric in config['metrices']:
name = list(metric.keys())[0]
fig, img = self.plot(event_acc, tag=name)
pp.savefig(fig)
cv2.imwrite(resultpath+'/'+name+'.png', img)
pp.close()
def hyperparamopt(self, config, hyperparamoptimizer, resultpath):
filename = os.path.join(resultpath, 'trials.csv')
df = pd.DataFrame(data=hyperparamoptimizer.trials.results)
df = df.set_index('loss')
df.to_csv(filename)
pp = PdfPages(os.path.join(resultpath, 'paramopt.pdf'))
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
for result in hyperparamoptimizer.trials.results:
trial = result['trial']
l = result['loss']
_, loss = self.plot(event_acc, tag='trial'+str(trial)+'_loss')
val_image = self.image(
event_acc, tag='trial'+str(trial)+'_val_image')
val_mask = self.image(
event_acc, tag='trial'+str(trial)+'_val_mask')
val_predicted = self.image(
event_acc, tag='trial'+str(trial)+'_val_predicted')
fig = plt.figure()
fig.add_subplot(2, 4, 1)
plt.axis('on')
plt.imshow(loss)
fig.add_subplot(2, 4, 2)
plt.axis('off')
plt.imshow(val_image)
fig.add_subplot(2, 4, 3)
plt.axis('off')
plt.imshow(val_mask)
fig.add_subplot(2, 4, 4)
plt.axis('off')
plt.imshow(val_predicted)
plt.text(0.05, 0.95, 'trial ' + str(trial) + " loss: " +
str(l), transform=fig.transFigure, size=24)
for i, m in enumerate(config['metrices']):
name = list(m.keys())[0]
tag = 'trial'+str(trial)+'_'+name
_, metric = self.plot(event_acc, tag=tag)
fig.add_subplot(2, len(config['metrices']), len(
config['metrices']) + i+1)
plt.imshow(metric)
pp.attach_note(result['params'])
pp.savefig(fig)
plt.close(fig)
pp.close()
| 35.318841 | 90 | 0.560936 |
import os
import numpy as np
import cv2
import json
import pandas as pd
import tensorflow as tf
from tensorboard.backend.event_processing import event_accumulator as ea
from matplotlib import pyplot as plt
from matplotlib import colors as colors
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style="darkgrid")
sns.set_context("paper")
from matplotlib.backends.backend_pdf import PdfPages
class Report():
def __init__(self, configs, resultspath='results/'):
self.configs = configs
self.resultspath = resultspath
assert(configs)
def plot(self, acc, tag='loss', smooth_space=100, color_code='#4169E1'):
x_list = []
y_list = []
x_list_raw = []
y_list_raw = []
try:
x = [int(s.step) for s in acc.Scalars(tag=tag)]
y = [s.value for s in acc.Scalars(tag=tag)]
x_ = []
y_ = []
for i in range(0, len(x), smooth_space):
x_.append(x[i])
y_.append(sum(y[i:i+smooth_space]) / float(smooth_space))
x_.append(x[-1])
y_.append(y[-1])
x_list = x_
y_list = y_
x_list_raw = x
y_list_raw = y
except Exception as e:
print(e)
fig, ax = plt.subplots()
plt.title(tag)
plt.plot(x_list_raw, y_list_raw,
color=colors.to_rgba(color_code, alpha=0.4))
plt.plot(x_list, y_list, color=color_code, linewidth=1.5)
fig.canvas.draw()
return fig, np.array(fig.canvas.renderer._renderer)
def image(self, acc, tag='loss'):
image_list = acc.Images(tag=tag)
with tf.Session() as sess:
img = tf.image.decode_image(image_list[-1].encoded_image_string)
npimg = img.eval(session=sess)
return npimg
def generate(self):
pp = PdfPages(os.path.join(self.resultspath,
os.path.basename(self.configs.filename) + '.pdf'))
for run in self.configs:
resultpath = os.path.join(self.resultspath, run)
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
fig, img = self.plot(event_acc, tag="loss")
plt.text(0.05, 0.95, run, transform=fig.transFigure, size=24)
pp.savefig(fig)
cv2.imwrite(resultpath+'/loss.png', img)
config = self.configs.get()
for metric in config['metrices']:
name = list(metric.keys())[0]
fig, img = self.plot(event_acc, tag=name)
pp.savefig(fig)
cv2.imwrite(resultpath+'/'+name+'.png', img)
pp.close()
def hyperparamopt(self, config, hyperparamoptimizer, resultpath):
filename = os.path.join(resultpath, 'trials.csv')
df = pd.DataFrame(data=hyperparamoptimizer.trials.results)
df = df.set_index('loss')
df.to_csv(filename)
pp = PdfPages(os.path.join(resultpath, 'paramopt.pdf'))
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
for result in hyperparamoptimizer.trials.results:
trial = result['trial']
l = result['loss']
_, loss = self.plot(event_acc, tag='trial'+str(trial)+'_loss')
val_image = self.image(
event_acc, tag='trial'+str(trial)+'_val_image')
val_mask = self.image(
event_acc, tag='trial'+str(trial)+'_val_mask')
val_predicted = self.image(
event_acc, tag='trial'+str(trial)+'_val_predicted')
fig = plt.figure()
fig.add_subplot(2, 4, 1)
plt.axis('on')
plt.imshow(loss)
fig.add_subplot(2, 4, 2)
plt.axis('off')
plt.imshow(val_image)
fig.add_subplot(2, 4, 3)
plt.axis('off')
plt.imshow(val_mask)
fig.add_subplot(2, 4, 4)
plt.axis('off')
plt.imshow(val_predicted)
plt.text(0.05, 0.95, 'trial ' + str(trial) + " loss: " +
str(l), transform=fig.transFigure, size=24)
for i, m in enumerate(config['metrices']):
name = list(m.keys())[0]
tag = 'trial'+str(trial)+'_'+name
_, metric = self.plot(event_acc, tag=tag)
fig.add_subplot(2, len(config['metrices']), len(
config['metrices']) + i+1)
plt.imshow(metric)
pp.attach_note(result['params'])
pp.savefig(fig)
plt.close(fig)
pp.close()
| true | true |
f72077cdf636f62c3c764ed25b75858a0cc4d91d | 434 | py | Python | data/scripts/templates/object/mobile/shared_giant_veermok.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/mobile/shared_giant_veermok.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/mobile/shared_giant_veermok.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_giant_veermok.iff"
result.attribute_template_id = 9
result.stfName("monster_name","veermok")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 25.529412 | 59 | 0.723502 | true | true | |
f72077d78671f48577f3268c91b0668f5686d755 | 214 | py | Python | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/012.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/012.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/012.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | print('Exercício Python #012 - Calculando Descontos')
a6 = float(input('Preço: '))
b6 = int(input('Desconto:'))
c6 = a6 * b6 / 100
d6 = a6 - c6
print(' O valor com o desconto de {} % é de {} Reais '.format(b6, d6)) | 35.666667 | 70 | 0.630841 | print('Exercício Python #012 - Calculando Descontos')
a6 = float(input('Preço: '))
b6 = int(input('Desconto:'))
c6 = a6 * b6 / 100
d6 = a6 - c6
print(' O valor com o desconto de {} % é de {} Reais '.format(b6, d6)) | true | true |
f720787fc556e48c6de48c95c2046dbcd33827a9 | 26,887 | py | Python | dask_kubernetes/classic/kubecluster.py | Matt711/dask-kubernetes | 8190529fc140b6ea2c345bde02aa1c647272eb98 | [
"BSD-3-Clause"
] | 1 | 2022-01-20T12:38:27.000Z | 2022-01-20T12:38:27.000Z | dask_kubernetes/classic/kubecluster.py | Matt711/dask-kubernetes | 8190529fc140b6ea2c345bde02aa1c647272eb98 | [
"BSD-3-Clause"
] | null | null | null | dask_kubernetes/classic/kubecluster.py | Matt711/dask-kubernetes | 8190529fc140b6ea2c345bde02aa1c647272eb98 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import copy
import getpass
import logging
import os
import time
import uuid
import warnings
import yaml
import dask
import dask.distributed
import distributed.security
from distributed.deploy import SpecCluster, ProcessInterface
from distributed.utils import Log, Logs
import kubernetes_asyncio as kubernetes
from kubernetes_asyncio.client.rest import ApiException
from ..constants import KUBECLUSTER_WORKER_CONTAINER_NAME
from ..common.objects import (
make_pod_from_dict,
make_service_from_dict,
make_pdb_from_dict,
clean_pod_template,
clean_service_template,
clean_pdb_template,
)
from ..common.auth import ClusterAuth
from ..common.utils import (
namespace_default,
escape,
)
from ..common.networking import get_external_address_for_scheduler_service
logger = logging.getLogger(__name__)
SCHEDULER_PORT = 8786
class Pod(ProcessInterface):
"""A superclass for Kubernetes Pods
See Also
--------
Worker
Scheduler
"""
def __init__(
self,
cluster,
core_api,
policy_api,
pod_template,
namespace,
loop=None,
**kwargs
):
self._pod = None
self.cluster = cluster
self.core_api = core_api
self.policy_api = policy_api
self.pod_template = copy.deepcopy(pod_template)
self.base_labels = self.pod_template.metadata.labels
self.namespace = namespace
self.name = None
self.loop = loop
self.kwargs = kwargs
super().__init__()
@property
def cluster_name(self):
return self.pod_template.metadata.labels["dask.org/cluster-name"]
async def start(self, **kwargs):
retry_count = 0 # Retry 10 times
while True:
try:
self._pod = await self.core_api.create_namespaced_pod(
self.namespace, self.pod_template
)
return await super().start(**kwargs)
except ApiException as e:
if retry_count < 10:
logger.debug("Error when creating pod, retrying... - %s", str(e))
await asyncio.sleep(0.1)
retry_count += 1
else:
raise e
async def close(self, **kwargs):
if self._pod:
name, namespace = self._pod.metadata.name, self.namespace
try:
await self.core_api.delete_namespaced_pod(name, namespace)
except ApiException as e:
if e.reason == "Not Found":
logger.debug(
"Pod %s in namespace %s has been deleated already.",
name,
namespace,
)
else:
raise
await super().close(**kwargs)
async def logs(self):
try:
log = await self.core_api.read_namespaced_pod_log(
self._pod.metadata.name,
self.namespace,
container=KUBECLUSTER_WORKER_CONTAINER_NAME,
)
except ApiException as e:
if "waiting to start" in str(e):
log = ""
else:
raise e
return Log(log)
async def describe_pod(self):
self._pod = await self.core_api.read_namespaced_pod(
self._pod.metadata.name, self.namespace
)
return self._pod
def __repr__(self):
return "<Pod %s: status=%s>" % (type(self).__name__, self.status)
class Worker(Pod):
"""A Remote Dask Worker controled by Kubernetes
Parameters
----------
scheduler: str
The address of the scheduler
name (optional):
The name passed to the dask-worker CLI at creation time.
"""
def __init__(self, scheduler: str, name=None, **kwargs):
super().__init__(**kwargs)
self.scheduler = scheduler
self.pod_template.metadata.labels["dask.org/component"] = "worker"
self.pod_template.spec.containers[0].env.append(
kubernetes.client.V1EnvVar(
name="DASK_SCHEDULER_ADDRESS", value=self.scheduler
)
)
if name is not None:
worker_name_args = ["--name", str(name)]
self.pod_template.spec.containers[0].args += worker_name_args
class Scheduler(Pod):
"""A Remote Dask Scheduler controled by Kubernetes
Parameters
----------
idle_timeout: str, optional
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
service_wait_timeout_s: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
"""
def __init__(
self,
idle_timeout: str,
service_wait_timeout_s: int = None,
service_name_retries: int = None,
**kwargs
):
super().__init__(**kwargs)
self.cluster._log("Creating scheduler pod on cluster. This may take some time.")
self.service = None
self._idle_timeout = idle_timeout
self._service_wait_timeout_s = service_wait_timeout_s
self._service_name_retries = service_name_retries
if self._idle_timeout is not None:
self.pod_template.spec.containers[0].args += [
"--idle-timeout",
self._idle_timeout,
]
self.pdb = None
async def start(self, **kwargs):
await super().start(**kwargs)
while (await self.describe_pod()).status.phase == "Pending":
await asyncio.sleep(0.1)
while self.address is None:
logs = await self.logs()
for line in logs.splitlines():
if "Scheduler at:" in line:
self.address = line.split("Scheduler at:")[1].strip()
await asyncio.sleep(0.1)
self.service = await self._create_service()
self.address = "tcp://{name}.{namespace}:{port}".format(
name=self.service.metadata.name,
namespace=self.namespace,
port=SCHEDULER_PORT,
)
self.external_address = await get_external_address_for_scheduler_service(
self.core_api,
self.service,
service_name_resolution_retries=self._service_name_retries,
)
self.pdb = await self._create_pdb()
async def close(self, **kwargs):
if self.service:
await self.core_api.delete_namespaced_service(
self.cluster_name, self.namespace
)
if self.pdb:
await self.policy_api.delete_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
await super().close(**kwargs)
async def _create_service(self):
service_template_dict = dask.config.get("kubernetes.scheduler-service-template")
self.service_template = clean_service_template(
make_service_from_dict(service_template_dict)
)
self.service_template.metadata.name = self.cluster_name
self.service_template.metadata.labels = copy.deepcopy(self.base_labels)
self.service_template.spec.selector["dask.org/cluster-name"] = self.cluster_name
if self.service_template.spec.type is None:
self.service_template.spec.type = dask.config.get(
"kubernetes.scheduler-service-type"
)
await self.core_api.create_namespaced_service(
self.namespace, self.service_template
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
if service.spec.type == "LoadBalancer":
# Wait for load balancer to be assigned
start = time.time()
while service.status.load_balancer.ingress is None:
if (
self._service_wait_timeout_s > 0
and time.time() > start + self._service_wait_timeout_s
):
raise asyncio.TimeoutError(
"Timed out waiting for Load Balancer to be provisioned."
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
await asyncio.sleep(0.2)
return service
async def _create_pdb(self):
pdb_template_dict = dask.config.get("kubernetes.scheduler-pdb-template")
self.pdb_template = clean_pdb_template(make_pdb_from_dict(pdb_template_dict))
self.pdb_template.metadata.name = self.cluster_name
self.pdb_template.metadata.labels = copy.deepcopy(self.base_labels)
self.pdb_template.spec.selector.match_labels[
"dask.org/cluster-name"
] = self.cluster_name
await self.policy_api.create_namespaced_pod_disruption_budget(
self.namespace, self.pdb_template
)
return await self.policy_api.read_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
class KubeCluster(SpecCluster):
"""Launch a Dask cluster on Kubernetes
This starts a local Dask scheduler and then dynamically launches
Dask workers on a Kubernetes cluster. The Kubernetes cluster is taken
to be either the current one on which this code is running, or as a
fallback, the default one configured in a kubeconfig file.
**Environments**
Your worker pod image should have a similar environment to your local
environment, including versions of Python, dask, cloudpickle, and any
libraries that you may wish to use (like NumPy, Pandas, or Scikit-Learn).
See examples below for suggestions on how to manage and check for this.
**Network**
Since the Dask scheduler is launched locally, for it to work, we need to
be able to open network connections between this local node and all the
workers nodes on the Kubernetes cluster. If the current process is not
already on a Kubernetes node, some network configuration will likely be
required to make this work.
**Resources**
Your Kubernetes resource limits and requests should match the
``--memory-limit`` and ``--nthreads`` parameters given to the
``dask-worker`` command.
Parameters
----------
pod_template: (kubernetes.client.V1Pod, dict, str)
A Kubernetes specification for a Pod for a dask worker. Can be either a
``V1Pod``, a dict representation of a pod, or a path to a yaml file
containing a pod specification.
scheduler_pod_template: kubernetes.client.V1Pod (optional)
A Kubernetes specification for a Pod for a dask scheduler.
Defaults to the pod_template.
name: str (optional)
Name given to the pods. Defaults to ``dask-$USER-random``
namespace: str (optional)
Namespace in which to launch the workers.
Defaults to current namespace if available or "default"
n_workers: int
Number of workers on initial launch.
Use ``scale`` to change this number in the future
env: Dict[str, str]
Dictionary of environment variables to pass to worker pod
host: str
Listen address for local scheduler. Defaults to 0.0.0.0
port: int
Port of local scheduler
auth: List[ClusterAuth] (optional)
Configuration methods to attempt in order. Defaults to
``[InCluster(), KubeConfig()]``.
idle_timeout: str (optional)
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
scheduler_service_wait_timeout: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
scheduler_service_name_resolution_retries: int (optional)
Number of retries to resolve scheduler service name when running
from within the Kubernetes cluster.
Defaults to 20.
Must be set to 1 or greater.
deploy_mode: str (optional)
Run the scheduler as "local" or "remote".
Defaults to ``"remote"``.
**kwargs: dict
Additional keyword arguments to pass to LocalCluster
Examples
--------
>>> from dask_kubernetes import KubeCluster, make_pod_spec
>>> pod_spec = make_pod_spec(image='ghcr.io/dask/dask:latest',
... memory_limit='4G', memory_request='4G',
... cpu_limit=1, cpu_request=1,
... env={'EXTRA_PIP_PACKAGES': 'fastparquet git+https://github.com/dask/distributed'})
>>> cluster = KubeCluster(pod_spec)
>>> cluster.scale(10)
You can also create clusters with worker pod specifications as dictionaries
or stored in YAML files
>>> cluster = KubeCluster('worker-template.yml')
>>> cluster = KubeCluster({...})
Rather than explicitly setting a number of workers you can also ask the
cluster to allocate workers dynamically based on current workload
>>> cluster.adapt()
You can pass this cluster directly to a Dask client
>>> from dask.distributed import Client
>>> client = Client(cluster)
You can verify that your local environment matches your worker environments
by calling ``client.get_versions(check=True)``. This will raise an
informative error if versions do not match.
>>> client.get_versions(check=True)
The ``ghcr.io/dask/dask`` docker images support ``EXTRA_PIP_PACKAGES``,
``EXTRA_APT_PACKAGES`` and ``EXTRA_CONDA_PACKAGES`` environment variables
to help with small adjustments to the worker environments. We recommend
the use of pip over conda in this case due to a much shorter startup time.
These environment variables can be modified directly from the KubeCluster
constructor methods using the ``env=`` keyword. You may list as many
packages as you like in a single string like the following:
>>> pip = 'pyarrow gcsfs git+https://github.com/dask/distributed'
>>> conda = '-c conda-forge scikit-learn'
>>> KubeCluster(..., env={'EXTRA_PIP_PACKAGES': pip,
... 'EXTRA_CONDA_PACKAGES': conda})
You can also start a KubeCluster with no arguments *if* the worker template
is specified in the Dask config files, either as a full template in
``kubernetes.worker-template`` or a path to a YAML file in
``kubernetes.worker-template-path``.
See https://docs.dask.org/en/latest/configuration.html for more
information about setting configuration values.::
$ export DASK_KUBERNETES__WORKER_TEMPLATE_PATH=worker_template.yaml
>>> cluster = KubeCluster() # automatically finds 'worker_template.yaml'
See Also
--------
KubeCluster.adapt
"""
def __init__(
self,
pod_template=None,
name=None,
namespace=None,
n_workers=None,
host=None,
port=None,
env=None,
auth=ClusterAuth.DEFAULT,
idle_timeout=None,
deploy_mode=None,
interface=None,
protocol=None,
dashboard_address=None,
security=None,
scheduler_service_wait_timeout=None,
scheduler_service_name_resolution_retries=None,
scheduler_pod_template=None,
**kwargs
):
if isinstance(pod_template, str):
with open(pod_template) as f:
pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(pod_template, dict):
pod_template = make_pod_from_dict(pod_template)
if isinstance(scheduler_pod_template, str):
with open(scheduler_pod_template) as f:
scheduler_pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(scheduler_pod_template, dict):
scheduler_pod_template = make_pod_from_dict(scheduler_pod_template)
self.pod_template = pod_template
self.scheduler_pod_template = scheduler_pod_template
self._generate_name = dask.config.get("kubernetes.name", override_with=name)
self.namespace = dask.config.get(
"kubernetes.namespace", override_with=namespace
)
self._n_workers = dask.config.get(
"kubernetes.count.start", override_with=n_workers
)
self._idle_timeout = dask.config.get(
"kubernetes.idle-timeout", override_with=idle_timeout
)
self._deploy_mode = dask.config.get(
"kubernetes.deploy-mode", override_with=deploy_mode
)
self._protocol = dask.config.get("kubernetes.protocol", override_with=protocol)
self._interface = dask.config.get(
"kubernetes.interface", override_with=interface
)
self._dashboard_address = dask.config.get(
"kubernetes.dashboard_address", override_with=dashboard_address
)
self._scheduler_service_wait_timeout = dask.config.get(
"kubernetes.scheduler-service-wait-timeout",
override_with=scheduler_service_wait_timeout,
)
self._scheduler_service_name_resolution_retries = dask.config.get(
"kubernetes.scheduler-service-name-resolution-retries",
override_with=scheduler_service_name_resolution_retries,
)
self.security = security
if self.security and not isinstance(
self.security, distributed.security.Security
):
raise RuntimeError(
"Security object is not a valid distributed.security.Security object"
)
self.host = dask.config.get("kubernetes.host", override_with=host)
self.port = dask.config.get("kubernetes.port", override_with=port)
self.env = dask.config.get("kubernetes.env", override_with=env)
self.auth = auth
self.kwargs = kwargs
super().__init__(**self.kwargs)
def _get_pod_template(self, pod_template, pod_type):
if not pod_template and dask.config.get(
"kubernetes.{}-template".format(pod_type), None
):
d = dask.config.get("kubernetes.{}-template".format(pod_type))
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
if not pod_template and dask.config.get(
"kubernetes.{}-template-path".format(pod_type), None
):
import yaml
fn = dask.config.get("kubernetes.{}-template-path".format(pod_type))
fn = fn.format(**os.environ)
with open(fn) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
return pod_template
def _fill_pod_templates(self, pod_template, pod_type):
pod_template = copy.deepcopy(pod_template)
# Default labels that can't be overwritten
pod_template.metadata.labels["dask.org/cluster-name"] = self._generate_name
pod_template.metadata.labels["dask.org/component"] = pod_type
pod_template.metadata.labels["user"] = escape(getpass.getuser())
pod_template.metadata.labels["app"] = "dask"
pod_template.metadata.namespace = self.namespace
if self.env:
pod_template.spec.containers[0].env.extend(
[
kubernetes.client.V1EnvVar(name=k, value=str(v))
for k, v in self.env.items()
]
)
pod_template.metadata.generate_name = self._generate_name
return pod_template
async def _start(self):
self.pod_template = self._get_pod_template(self.pod_template, pod_type="worker")
self.scheduler_pod_template = self._get_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
if not self.pod_template:
msg = (
"Worker pod specification not provided. See KubeCluster "
"docstring for ways to specify workers"
)
raise ValueError(msg)
base_pod_template = self.pod_template
self.pod_template = clean_pod_template(self.pod_template, pod_type="worker")
if not self.scheduler_pod_template:
self.scheduler_pod_template = base_pod_template
self.scheduler_pod_template.spec.containers[0].args = ["dask-scheduler"]
self.scheduler_pod_template = clean_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
await ClusterAuth.load_first(self.auth)
self.core_api = kubernetes.client.CoreV1Api()
self.policy_api = kubernetes.client.PolicyV1beta1Api()
if self.namespace is None:
self.namespace = namespace_default()
environ = {k: v for k, v in os.environ.items() if k not in ["user", "uuid"]}
self._generate_name = self._generate_name.format(
user=getpass.getuser(), uuid=str(uuid.uuid4())[:10], **environ
)
self._generate_name = escape(self._generate_name)
self.pod_template = self._fill_pod_templates(
self.pod_template, pod_type="worker"
)
self.scheduler_pod_template = self._fill_pod_templates(
self.scheduler_pod_template, pod_type="scheduler"
)
common_options = {
"cluster": self,
"core_api": self.core_api,
"policy_api": self.policy_api,
"namespace": self.namespace,
"loop": self.loop,
}
if self._deploy_mode == "local":
self.scheduler_spec = {
"cls": dask.distributed.Scheduler,
"options": {
"protocol": self._protocol,
"interface": self._interface,
"host": self.host,
"port": self.port,
"dashboard_address": self._dashboard_address,
"security": self.security,
},
}
elif self._deploy_mode == "remote":
self.scheduler_spec = {
"cls": Scheduler,
"options": {
"idle_timeout": self._idle_timeout,
"service_wait_timeout_s": self._scheduler_service_wait_timeout,
"service_name_retries": self._scheduler_service_name_resolution_retries,
"pod_template": self.scheduler_pod_template,
**common_options,
},
}
else:
raise RuntimeError("Unknown deploy mode %s" % self._deploy_mode)
self.new_spec = {
"cls": Worker,
"options": {"pod_template": self.pod_template, **common_options},
}
self.worker_spec = {i: self.new_spec for i in range(self._n_workers)}
self.name = self.pod_template.metadata.generate_name
await super()._start()
@classmethod
def from_dict(cls, pod_spec, **kwargs):
"""Create cluster with worker pod spec defined by Python dictionary
Deprecated, please use the `KubeCluster` constructor directly.
Examples
--------
>>> spec = {
... 'metadata': {},
... 'spec': {
... 'containers': [{
... 'args': ['dask-worker', '$(DASK_SCHEDULER_ADDRESS)',
... '--nthreads', '1',
... '--death-timeout', '60'],
... 'command': None,
... 'image': 'ghcr.io/dask/dask:latest',
... 'name': 'dask-worker',
... }],
... 'restartPolicy': 'Never',
... }
... }
>>> cluster = KubeCluster.from_dict(spec, namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_yaml
"""
warnings.warn(
"KubeCluster.from_dict is deprecated, use the constructor directly"
)
return cls(pod_spec, **kwargs)
@classmethod
def from_yaml(cls, yaml_path, **kwargs):
"""Create cluster with worker pod spec defined by a YAML file
Deprecated, please use the `KubeCluster` constructor directly.
We can start a cluster with pods defined in an accompanying YAML file
like the following:
.. code-block:: yaml
kind: Pod
metadata:
labels:
foo: bar
baz: quux
spec:
containers:
- image: ghcr.io/dask/dask:latest
name: dask-worker
args: [dask-worker, $(DASK_SCHEDULER_ADDRESS), --nthreads, '2', --memory-limit, 8GB]
restartPolicy: Never
Examples
--------
>>> cluster = KubeCluster.from_yaml('pod.yaml', namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_dict
"""
warnings.warn(
"KubeCluster.from_yaml is deprecated, use the constructor directly"
)
return cls(yaml_path, **kwargs)
def scale(self, n):
# A shim to maintain backward compatibility
# https://github.com/dask/distributed/issues/3054
maximum = dask.config.get("kubernetes.count.max")
if maximum is not None and maximum < n:
logger.info(
"Tried to scale beyond maximum number of workers %d > %d", n, maximum
)
n = maximum
return super().scale(n)
async def _logs(self, scheduler=True, workers=True):
"""Return logs for the scheduler and workers
Parameters
----------
scheduler : boolean
Whether or not to collect logs for the scheduler
workers : boolean or Iterable[str], optional
A list of worker addresses to select.
Defaults to all workers if `True` or no workers if `False`
Returns
-------
logs: Dict[str]
A dictionary of logs, with one item for the scheduler and one for
each worker
"""
logs = Logs()
if scheduler:
logs["Scheduler"] = await self.scheduler.logs()
if workers:
worker_logs = await asyncio.gather(
*[w.logs() for w in self.workers.values()]
)
for key, log in zip(self.workers, worker_logs):
logs[key] = log
return logs
| 36.481682 | 115 | 0.609886 | import asyncio
import copy
import getpass
import logging
import os
import time
import uuid
import warnings
import yaml
import dask
import dask.distributed
import distributed.security
from distributed.deploy import SpecCluster, ProcessInterface
from distributed.utils import Log, Logs
import kubernetes_asyncio as kubernetes
from kubernetes_asyncio.client.rest import ApiException
from ..constants import KUBECLUSTER_WORKER_CONTAINER_NAME
from ..common.objects import (
make_pod_from_dict,
make_service_from_dict,
make_pdb_from_dict,
clean_pod_template,
clean_service_template,
clean_pdb_template,
)
from ..common.auth import ClusterAuth
from ..common.utils import (
namespace_default,
escape,
)
from ..common.networking import get_external_address_for_scheduler_service
logger = logging.getLogger(__name__)
SCHEDULER_PORT = 8786
class Pod(ProcessInterface):
def __init__(
self,
cluster,
core_api,
policy_api,
pod_template,
namespace,
loop=None,
**kwargs
):
self._pod = None
self.cluster = cluster
self.core_api = core_api
self.policy_api = policy_api
self.pod_template = copy.deepcopy(pod_template)
self.base_labels = self.pod_template.metadata.labels
self.namespace = namespace
self.name = None
self.loop = loop
self.kwargs = kwargs
super().__init__()
@property
def cluster_name(self):
return self.pod_template.metadata.labels["dask.org/cluster-name"]
async def start(self, **kwargs):
retry_count = 0
while True:
try:
self._pod = await self.core_api.create_namespaced_pod(
self.namespace, self.pod_template
)
return await super().start(**kwargs)
except ApiException as e:
if retry_count < 10:
logger.debug("Error when creating pod, retrying... - %s", str(e))
await asyncio.sleep(0.1)
retry_count += 1
else:
raise e
async def close(self, **kwargs):
if self._pod:
name, namespace = self._pod.metadata.name, self.namespace
try:
await self.core_api.delete_namespaced_pod(name, namespace)
except ApiException as e:
if e.reason == "Not Found":
logger.debug(
"Pod %s in namespace %s has been deleated already.",
name,
namespace,
)
else:
raise
await super().close(**kwargs)
async def logs(self):
try:
log = await self.core_api.read_namespaced_pod_log(
self._pod.metadata.name,
self.namespace,
container=KUBECLUSTER_WORKER_CONTAINER_NAME,
)
except ApiException as e:
if "waiting to start" in str(e):
log = ""
else:
raise e
return Log(log)
async def describe_pod(self):
self._pod = await self.core_api.read_namespaced_pod(
self._pod.metadata.name, self.namespace
)
return self._pod
def __repr__(self):
return "<Pod %s: status=%s>" % (type(self).__name__, self.status)
class Worker(Pod):
def __init__(self, scheduler: str, name=None, **kwargs):
super().__init__(**kwargs)
self.scheduler = scheduler
self.pod_template.metadata.labels["dask.org/component"] = "worker"
self.pod_template.spec.containers[0].env.append(
kubernetes.client.V1EnvVar(
name="DASK_SCHEDULER_ADDRESS", value=self.scheduler
)
)
if name is not None:
worker_name_args = ["--name", str(name)]
self.pod_template.spec.containers[0].args += worker_name_args
class Scheduler(Pod):
def __init__(
self,
idle_timeout: str,
service_wait_timeout_s: int = None,
service_name_retries: int = None,
**kwargs
):
super().__init__(**kwargs)
self.cluster._log("Creating scheduler pod on cluster. This may take some time.")
self.service = None
self._idle_timeout = idle_timeout
self._service_wait_timeout_s = service_wait_timeout_s
self._service_name_retries = service_name_retries
if self._idle_timeout is not None:
self.pod_template.spec.containers[0].args += [
"--idle-timeout",
self._idle_timeout,
]
self.pdb = None
async def start(self, **kwargs):
await super().start(**kwargs)
while (await self.describe_pod()).status.phase == "Pending":
await asyncio.sleep(0.1)
while self.address is None:
logs = await self.logs()
for line in logs.splitlines():
if "Scheduler at:" in line:
self.address = line.split("Scheduler at:")[1].strip()
await asyncio.sleep(0.1)
self.service = await self._create_service()
self.address = "tcp://{name}.{namespace}:{port}".format(
name=self.service.metadata.name,
namespace=self.namespace,
port=SCHEDULER_PORT,
)
self.external_address = await get_external_address_for_scheduler_service(
self.core_api,
self.service,
service_name_resolution_retries=self._service_name_retries,
)
self.pdb = await self._create_pdb()
async def close(self, **kwargs):
if self.service:
await self.core_api.delete_namespaced_service(
self.cluster_name, self.namespace
)
if self.pdb:
await self.policy_api.delete_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
await super().close(**kwargs)
async def _create_service(self):
service_template_dict = dask.config.get("kubernetes.scheduler-service-template")
self.service_template = clean_service_template(
make_service_from_dict(service_template_dict)
)
self.service_template.metadata.name = self.cluster_name
self.service_template.metadata.labels = copy.deepcopy(self.base_labels)
self.service_template.spec.selector["dask.org/cluster-name"] = self.cluster_name
if self.service_template.spec.type is None:
self.service_template.spec.type = dask.config.get(
"kubernetes.scheduler-service-type"
)
await self.core_api.create_namespaced_service(
self.namespace, self.service_template
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
if service.spec.type == "LoadBalancer":
start = time.time()
while service.status.load_balancer.ingress is None:
if (
self._service_wait_timeout_s > 0
and time.time() > start + self._service_wait_timeout_s
):
raise asyncio.TimeoutError(
"Timed out waiting for Load Balancer to be provisioned."
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
await asyncio.sleep(0.2)
return service
async def _create_pdb(self):
pdb_template_dict = dask.config.get("kubernetes.scheduler-pdb-template")
self.pdb_template = clean_pdb_template(make_pdb_from_dict(pdb_template_dict))
self.pdb_template.metadata.name = self.cluster_name
self.pdb_template.metadata.labels = copy.deepcopy(self.base_labels)
self.pdb_template.spec.selector.match_labels[
"dask.org/cluster-name"
] = self.cluster_name
await self.policy_api.create_namespaced_pod_disruption_budget(
self.namespace, self.pdb_template
)
return await self.policy_api.read_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
class KubeCluster(SpecCluster):
def __init__(
self,
pod_template=None,
name=None,
namespace=None,
n_workers=None,
host=None,
port=None,
env=None,
auth=ClusterAuth.DEFAULT,
idle_timeout=None,
deploy_mode=None,
interface=None,
protocol=None,
dashboard_address=None,
security=None,
scheduler_service_wait_timeout=None,
scheduler_service_name_resolution_retries=None,
scheduler_pod_template=None,
**kwargs
):
if isinstance(pod_template, str):
with open(pod_template) as f:
pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(pod_template, dict):
pod_template = make_pod_from_dict(pod_template)
if isinstance(scheduler_pod_template, str):
with open(scheduler_pod_template) as f:
scheduler_pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(scheduler_pod_template, dict):
scheduler_pod_template = make_pod_from_dict(scheduler_pod_template)
self.pod_template = pod_template
self.scheduler_pod_template = scheduler_pod_template
self._generate_name = dask.config.get("kubernetes.name", override_with=name)
self.namespace = dask.config.get(
"kubernetes.namespace", override_with=namespace
)
self._n_workers = dask.config.get(
"kubernetes.count.start", override_with=n_workers
)
self._idle_timeout = dask.config.get(
"kubernetes.idle-timeout", override_with=idle_timeout
)
self._deploy_mode = dask.config.get(
"kubernetes.deploy-mode", override_with=deploy_mode
)
self._protocol = dask.config.get("kubernetes.protocol", override_with=protocol)
self._interface = dask.config.get(
"kubernetes.interface", override_with=interface
)
self._dashboard_address = dask.config.get(
"kubernetes.dashboard_address", override_with=dashboard_address
)
self._scheduler_service_wait_timeout = dask.config.get(
"kubernetes.scheduler-service-wait-timeout",
override_with=scheduler_service_wait_timeout,
)
self._scheduler_service_name_resolution_retries = dask.config.get(
"kubernetes.scheduler-service-name-resolution-retries",
override_with=scheduler_service_name_resolution_retries,
)
self.security = security
if self.security and not isinstance(
self.security, distributed.security.Security
):
raise RuntimeError(
"Security object is not a valid distributed.security.Security object"
)
self.host = dask.config.get("kubernetes.host", override_with=host)
self.port = dask.config.get("kubernetes.port", override_with=port)
self.env = dask.config.get("kubernetes.env", override_with=env)
self.auth = auth
self.kwargs = kwargs
super().__init__(**self.kwargs)
def _get_pod_template(self, pod_template, pod_type):
if not pod_template and dask.config.get(
"kubernetes.{}-template".format(pod_type), None
):
d = dask.config.get("kubernetes.{}-template".format(pod_type))
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
if not pod_template and dask.config.get(
"kubernetes.{}-template-path".format(pod_type), None
):
import yaml
fn = dask.config.get("kubernetes.{}-template-path".format(pod_type))
fn = fn.format(**os.environ)
with open(fn) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
return pod_template
def _fill_pod_templates(self, pod_template, pod_type):
pod_template = copy.deepcopy(pod_template)
pod_template.metadata.labels["dask.org/cluster-name"] = self._generate_name
pod_template.metadata.labels["dask.org/component"] = pod_type
pod_template.metadata.labels["user"] = escape(getpass.getuser())
pod_template.metadata.labels["app"] = "dask"
pod_template.metadata.namespace = self.namespace
if self.env:
pod_template.spec.containers[0].env.extend(
[
kubernetes.client.V1EnvVar(name=k, value=str(v))
for k, v in self.env.items()
]
)
pod_template.metadata.generate_name = self._generate_name
return pod_template
async def _start(self):
self.pod_template = self._get_pod_template(self.pod_template, pod_type="worker")
self.scheduler_pod_template = self._get_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
if not self.pod_template:
msg = (
"Worker pod specification not provided. See KubeCluster "
"docstring for ways to specify workers"
)
raise ValueError(msg)
base_pod_template = self.pod_template
self.pod_template = clean_pod_template(self.pod_template, pod_type="worker")
if not self.scheduler_pod_template:
self.scheduler_pod_template = base_pod_template
self.scheduler_pod_template.spec.containers[0].args = ["dask-scheduler"]
self.scheduler_pod_template = clean_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
await ClusterAuth.load_first(self.auth)
self.core_api = kubernetes.client.CoreV1Api()
self.policy_api = kubernetes.client.PolicyV1beta1Api()
if self.namespace is None:
self.namespace = namespace_default()
environ = {k: v for k, v in os.environ.items() if k not in ["user", "uuid"]}
self._generate_name = self._generate_name.format(
user=getpass.getuser(), uuid=str(uuid.uuid4())[:10], **environ
)
self._generate_name = escape(self._generate_name)
self.pod_template = self._fill_pod_templates(
self.pod_template, pod_type="worker"
)
self.scheduler_pod_template = self._fill_pod_templates(
self.scheduler_pod_template, pod_type="scheduler"
)
common_options = {
"cluster": self,
"core_api": self.core_api,
"policy_api": self.policy_api,
"namespace": self.namespace,
"loop": self.loop,
}
if self._deploy_mode == "local":
self.scheduler_spec = {
"cls": dask.distributed.Scheduler,
"options": {
"protocol": self._protocol,
"interface": self._interface,
"host": self.host,
"port": self.port,
"dashboard_address": self._dashboard_address,
"security": self.security,
},
}
elif self._deploy_mode == "remote":
self.scheduler_spec = {
"cls": Scheduler,
"options": {
"idle_timeout": self._idle_timeout,
"service_wait_timeout_s": self._scheduler_service_wait_timeout,
"service_name_retries": self._scheduler_service_name_resolution_retries,
"pod_template": self.scheduler_pod_template,
**common_options,
},
}
else:
raise RuntimeError("Unknown deploy mode %s" % self._deploy_mode)
self.new_spec = {
"cls": Worker,
"options": {"pod_template": self.pod_template, **common_options},
}
self.worker_spec = {i: self.new_spec for i in range(self._n_workers)}
self.name = self.pod_template.metadata.generate_name
await super()._start()
@classmethod
def from_dict(cls, pod_spec, **kwargs):
warnings.warn(
"KubeCluster.from_dict is deprecated, use the constructor directly"
)
return cls(pod_spec, **kwargs)
@classmethod
def from_yaml(cls, yaml_path, **kwargs):
warnings.warn(
"KubeCluster.from_yaml is deprecated, use the constructor directly"
)
return cls(yaml_path, **kwargs)
def scale(self, n):
# A shim to maintain backward compatibility
# https://github.com/dask/distributed/issues/3054
maximum = dask.config.get("kubernetes.count.max")
if maximum is not None and maximum < n:
logger.info(
"Tried to scale beyond maximum number of workers %d > %d", n, maximum
)
n = maximum
return super().scale(n)
async def _logs(self, scheduler=True, workers=True):
logs = Logs()
if scheduler:
logs["Scheduler"] = await self.scheduler.logs()
if workers:
worker_logs = await asyncio.gather(
*[w.logs() for w in self.workers.values()]
)
for key, log in zip(self.workers, worker_logs):
logs[key] = log
return logs
| true | true |
f720789c8c15c24400e6e5290d6e30a895646242 | 7,694 | py | Python | rfmizer/tests_views.py | JunglistMNSQ/rfm | f42fa1424edbc9c57c9cd27d8183574f72acbf5c | [
"BSD-3-Clause"
] | null | null | null | rfmizer/tests_views.py | JunglistMNSQ/rfm | f42fa1424edbc9c57c9cd27d8183574f72acbf5c | [
"BSD-3-Clause"
] | 4 | 2021-06-04T23:58:19.000Z | 2021-09-22T19:38:00.000Z | rfmizer/tests_views.py | JunglistMNSQ/rfm | f42fa1424edbc9c57c9cd27d8183574f72acbf5c | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from unittest import mock
from .fixtures import FixturesMixin
from .models import Person, Tab, User
import hashlib
# Create your tests here.
class TestRegister(FixturesMixin, TestCase):
def test_create_and_login(self):
self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password',
'password2': 'password'})
session = self.client.session
session.save()
user = User.objects.get_by_natural_key('TestUser1')
self.assertEqual(user.get_username(), 'TestUser1')
response = self.client.post('/login/',
{'username': 'TestUser1',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
def test_create_with_different_passwords(self):
response = self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password1',
'password2': 'password2'})
self.assertRaisesMessage(response, 'Пароли не совпадают')
class TestLogin(FixturesMixin, TestCase):
def test_login(self):
response = self.client.post('/login/',
{'username': 'TestUser',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
class TestUploadToParse(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/upload/', )
self.assertEqual(response.status_code, 200)
self.assertEqual(Tab.objects.filter(owner=self.user)[0],
self.tab_exist)
def test_create_and_parse_corrupt_file(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'name': 'test1',
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(tab.name, 'test1')
self.assertTrue(session['tab_is_new'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
{'col4': 'date',
'col3': 'name',
'col2': 'phone',
'col1': 'good',
'col0': 'pay'},
follow=True
)
self.assertEqual(response.redirect_chain,
[('/corrupt_data/',
302)])
self.assertEqual(response.status_code, 200)
def test_update_and_parse(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'choice_exist_tab': self.tab_exist.id,
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(session['tab_is_new'], False)
self.assertEqual(tab.name, self.tab_exist.name)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
self.column_order,
follow=True
)
tab = Tab.objects.get(pk=session['tab'])
self.assertEqual(
response.redirect_chain,
[('/my_tables/' + tab.slug, 302)]
)
class TestMyTables(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/my_tables/')
qs = response.context['list_tab']
self.assertSetEqual(
set(qs),
{self.tab_exist,
Tab.objects.get(pk=2),
Tab.objects.get(pk=3)}
)
self.assertEqual(response.status_code, 200)
class TestManageTab(FixturesMixin, TestCase):
def setUp(self):
super(TestManageTab, self).setUp()
self.url = reverse('manage_tab', args=(self.tab_exist.slug, ))
def test_get_post(self):
response = self.client.get(self.url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestDeleteTab(FixturesMixin, TestCase):
def test_post(self):
test_tab = Tab(owner=self.user, name='test_tab_del')
test_tab.save()
url = reverse('delete', args=(test_tab.slug, ))
response = self.client.post(url,
follow=True)
self.assertEqual(response.redirect_chain,
[('/my_tables', 302), ('/my_tables/', 301)])
class TestLog(FixturesMixin, TestCase):
def test_log(self):
response = self.client.get('/log/')
self.assertEqual(response.status_code, 200)
class TestClientList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('client_list',
kwargs={'slug': self.tab_exist.slug, })
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestClientCard(FixturesMixin, TestCase):
def test_get(self):
new_client = Person.get_new_line(self.data)
url = reverse('client_card',
kwargs={'slug_tab': self.tab_exist.slug,
'slug': new_client.slug})
response = self.client.get(url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestRulesList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('rules', kwargs={'slug': self.tab_exist.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestProfile(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/profile/')
self.assertEqual(response.status_code, 200)
@mock.patch('rfmizer.sms.RocketSMS.check_balance',
return_value=[True, 25, None])
def test_post(self, balance_mock):
password = 'test_sms_pass'
login = 'test_sms_login'
response = self.client.post('/profile/',
{'sms_login': login,
'sms_pass': password},
follow=True)
hash_pass = hashlib.md5(password.encode('utf-8')).hexdigest()
user = User.objects.get(pk=self.user.pk)
self.assertEqual(user.profile.sms_login, login)
self.assertEqual(user.profile.sms_pass, hash_pass)
self.assertEqual(user.profile.balance, 25)
self.assertEqual(response.status_code, 200)
balance_mock.assert_called_once()
| 38.089109 | 71 | 0.525214 | from django.test import TestCase
from django.urls import reverse
from unittest import mock
from .fixtures import FixturesMixin
from .models import Person, Tab, User
import hashlib
class TestRegister(FixturesMixin, TestCase):
def test_create_and_login(self):
self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password',
'password2': 'password'})
session = self.client.session
session.save()
user = User.objects.get_by_natural_key('TestUser1')
self.assertEqual(user.get_username(), 'TestUser1')
response = self.client.post('/login/',
{'username': 'TestUser1',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
def test_create_with_different_passwords(self):
response = self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password1',
'password2': 'password2'})
self.assertRaisesMessage(response, 'Пароли не совпадают')
class TestLogin(FixturesMixin, TestCase):
def test_login(self):
response = self.client.post('/login/',
{'username': 'TestUser',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
class TestUploadToParse(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/upload/', )
self.assertEqual(response.status_code, 200)
self.assertEqual(Tab.objects.filter(owner=self.user)[0],
self.tab_exist)
def test_create_and_parse_corrupt_file(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'name': 'test1',
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(tab.name, 'test1')
self.assertTrue(session['tab_is_new'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
{'col4': 'date',
'col3': 'name',
'col2': 'phone',
'col1': 'good',
'col0': 'pay'},
follow=True
)
self.assertEqual(response.redirect_chain,
[('/corrupt_data/',
302)])
self.assertEqual(response.status_code, 200)
def test_update_and_parse(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'choice_exist_tab': self.tab_exist.id,
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(session['tab_is_new'], False)
self.assertEqual(tab.name, self.tab_exist.name)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
self.column_order,
follow=True
)
tab = Tab.objects.get(pk=session['tab'])
self.assertEqual(
response.redirect_chain,
[('/my_tables/' + tab.slug, 302)]
)
class TestMyTables(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/my_tables/')
qs = response.context['list_tab']
self.assertSetEqual(
set(qs),
{self.tab_exist,
Tab.objects.get(pk=2),
Tab.objects.get(pk=3)}
)
self.assertEqual(response.status_code, 200)
class TestManageTab(FixturesMixin, TestCase):
def setUp(self):
super(TestManageTab, self).setUp()
self.url = reverse('manage_tab', args=(self.tab_exist.slug, ))
def test_get_post(self):
response = self.client.get(self.url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestDeleteTab(FixturesMixin, TestCase):
def test_post(self):
test_tab = Tab(owner=self.user, name='test_tab_del')
test_tab.save()
url = reverse('delete', args=(test_tab.slug, ))
response = self.client.post(url,
follow=True)
self.assertEqual(response.redirect_chain,
[('/my_tables', 302), ('/my_tables/', 301)])
class TestLog(FixturesMixin, TestCase):
def test_log(self):
response = self.client.get('/log/')
self.assertEqual(response.status_code, 200)
class TestClientList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('client_list',
kwargs={'slug': self.tab_exist.slug, })
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestClientCard(FixturesMixin, TestCase):
def test_get(self):
new_client = Person.get_new_line(self.data)
url = reverse('client_card',
kwargs={'slug_tab': self.tab_exist.slug,
'slug': new_client.slug})
response = self.client.get(url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestRulesList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('rules', kwargs={'slug': self.tab_exist.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestProfile(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/profile/')
self.assertEqual(response.status_code, 200)
@mock.patch('rfmizer.sms.RocketSMS.check_balance',
return_value=[True, 25, None])
def test_post(self, balance_mock):
password = 'test_sms_pass'
login = 'test_sms_login'
response = self.client.post('/profile/',
{'sms_login': login,
'sms_pass': password},
follow=True)
hash_pass = hashlib.md5(password.encode('utf-8')).hexdigest()
user = User.objects.get(pk=self.user.pk)
self.assertEqual(user.profile.sms_login, login)
self.assertEqual(user.profile.sms_pass, hash_pass)
self.assertEqual(user.profile.balance, 25)
self.assertEqual(response.status_code, 200)
balance_mock.assert_called_once()
| true | true |
f72078f20b5cc5766f8c851d62a5d4fcbc04c993 | 13 | py | Python | python/testData/inspections/PyUnresolvedReferencesInspection/ImportToContainingFileInPackage/p1/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyUnresolvedReferencesInspection/ImportToContainingFileInPackage/p1/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/inspections/PyUnresolvedReferencesInspection/ImportToContainingFileInPackage/p1/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | import p1.m1
| 6.5 | 12 | 0.769231 | import p1.m1
| true | true |
f72079009d9d6b4d0c1dd53f8c56f7204272da9b | 1,527 | py | Python | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/st7735r_minitft_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/st7735r_minitft_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/st7735r_minitft_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | """
This test will initialize the display using displayio and draw a solid green
background, a smaller purple rectangle, and some yellow text.
"""
import board
import terminalio
import displayio
from adafruit_display_text import label
from adafruit_st7735r import ST7735R
# Release any resources currently in use for the displays
displayio.release_displays()
spi = board.SPI()
tft_cs = board.D5
tft_dc = board.D6
display_bus = displayio.FourWire(
spi, command=tft_dc, chip_select=tft_cs, reset=board.D9
)
display = ST7735R(
display_bus, width=160, height=80, colstart=24, rotation=270, bgr=True
)
# Make the display context
splash = displayio.Group(max_size=10)
display.show(splash)
color_bitmap = displayio.Bitmap(160, 80, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0x00FF00 # Bright Green
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
splash.append(bg_sprite)
# Draw a smaller inner rectangle
inner_bitmap = displayio.Bitmap(150, 70, 1)
inner_palette = displayio.Palette(1)
inner_palette[0] = 0xAA0088 # Purple
inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=5, y=5)
splash.append(inner_sprite)
# Draw a label
text_group = displayio.Group(max_size=10, scale=2, x=11, y=40)
text = "Hello World!"
text_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00)
text_group.append(text_area) # Subgroup for text scaling
splash.append(text_group)
while True:
pass
| 28.277778 | 86 | 0.749836 |
import board
import terminalio
import displayio
from adafruit_display_text import label
from adafruit_st7735r import ST7735R
displayio.release_displays()
spi = board.SPI()
tft_cs = board.D5
tft_dc = board.D6
display_bus = displayio.FourWire(
spi, command=tft_dc, chip_select=tft_cs, reset=board.D9
)
display = ST7735R(
display_bus, width=160, height=80, colstart=24, rotation=270, bgr=True
)
splash = displayio.Group(max_size=10)
display.show(splash)
color_bitmap = displayio.Bitmap(160, 80, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0x00FF00
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
splash.append(bg_sprite)
inner_bitmap = displayio.Bitmap(150, 70, 1)
inner_palette = displayio.Palette(1)
inner_palette[0] = 0xAA0088
inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=5, y=5)
splash.append(inner_sprite)
text_group = displayio.Group(max_size=10, scale=2, x=11, y=40)
text = "Hello World!"
text_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00)
text_group.append(text_area)
splash.append(text_group)
while True:
pass
| true | true |
f7207935c2b023e55a08f2a1a9a84c47dc130d71 | 16,613 | py | Python | model/model.py | sahara2001/editsql | d4325ac996d1ed0069def6d349e43e2a1914e761 | [
"MIT"
] | null | null | null | model/model.py | sahara2001/editsql | d4325ac996d1ed0069def6d349e43e2a1914e761 | [
"MIT"
] | null | null | null | model/model.py | sahara2001/editsql | d4325ac996d1ed0069def6d349e43e2a1914e761 | [
"MIT"
] | null | null | null | """ Class for the Sequence to sequence model for ATIS."""
import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder, Encoder_Gnn
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
from .gated_graph_conv import GatedGraphConv
def get_token_indices(token, index_to_token):
""" Maps from a gold token (string) to a list of indices.
Inputs:
token (string): String to look up.
index_to_token (list of tokens): Ordered list of tokens.
Returns:
list of int, representing the indices of the token in the probability
distribution.
"""
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token): # no duplicates
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances):
""" Gets a flat sequence from a sequence of utterances.
Inputs:
utterances (list of list of str): Utterances to concatenate.
Returns:
list of str, representing the flattened sequence with separating
delimiter tokens.
"""
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
# print(output_vocabulary.inorder_tokens)
# print()
def read_glove_embedding(embedding_filename, embedding_size):
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
class ATISModel(torch.nn.Module):
""" Sequence-to-sequence model for predicting a SQL query given an utterance
and an interaction prefix.
"""
def __init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
super().__init__()
self.params = params
if params.use_bert:
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
self.gnn=None
if 'atis' not in params.data_directory:
if params.use_bert:
if params.use_gnn:
encoder_input_size = self.bert_config.hidden_size
encoder_output_size = params.encoder_state_size
self.gnn = GatedGraphConv(encoder_output_size, 2, 3) #input_dim, num_timesteps, num_edge_types,
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
else:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
params.input_embedding_size = input_embedding_size
self.params.input_embedding_size = input_embedding_size
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
initializer=input_vocabulary_embeddings,
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=params.freeze)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = Embedder(params.input_embedding_size,
name="schema-embedding",
initializer=output_vocabulary_schema_embeddings,
vocabulary=output_vocabulary_schema,
anonymizer=anonymizer,
freeze=params.freeze)
else:
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=False)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
# Create the encoder
encoder_input_size = params.input_embedding_size
encoder_output_size = params.encoder_state_size
if params.use_bert:
encoder_input_size = self.bert_config.hidden_size
if params.discourse_level_lstm:
encoder_input_size += params.encoder_state_size / 2
self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)
# Positional embedder for utterances
attention_key_size = params.encoder_state_size
self.schema_attention_key_size = attention_key_size
if params.state_positional_embeddings:
attention_key_size += params.positional_embedding_size
self.positional_embedder = Embedder(
params.positional_embedding_size,
name="positional-embedding",
num_tokens=params.maximum_utterances)
self.utterance_attention_key_size = attention_key_size
# Create the discourse-level LSTM parameters
if params.discourse_level_lstm:
self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, "LSTM-t")
self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), "V-turn-state-0")
# Snippet encoder
final_snippet_size = 0
if params.use_snippets and not params.previous_decoder_snippet_encoding:
snippet_encoding_size = int(params.encoder_state_size / 2)
final_snippet_size = params.encoder_state_size
if params.snippet_age_embedding:
snippet_encoding_size -= int(
params.snippet_age_embedding_size / 4)
self.snippet_age_embedder = Embedder(
params.snippet_age_embedding_size,
name="snippet-age-embedding",
num_tokens=params.max_snippet_age_embedding)
final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2
self.snippet_encoder = Encoder(params.snippet_num_layers,
params.output_embedding_size,
snippet_encoding_size)
# Previous query Encoder
if params.use_previous_query:
self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)
self.final_snippet_size = final_snippet_size
self.dropout = 0.
def _encode_snippets(self, previous_query, snippets, input_schema):
""" Computes a single vector representation for each snippet.
Inputs:
previous_query (list of str): Previous query in the interaction.
snippets (list of Snippet): Snippets extracted from the previous
Returns:
list of Snippets, where the embedding is set to a vector.
"""
startpoints = [snippet.startpos for snippet in snippets]
endpoints = [snippet.endpos for snippet in snippets]
assert len(startpoints) == 0 or min(startpoints) >= 0
if input_schema:
assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)
else:
assert len(endpoints) == 0 or max(endpoints) < len(previous_query)
snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
if previous_query and snippets:
_, previous_outputs = self.snippet_encoder(
previous_query, snippet_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
for snippet in snippets:
if input_schema:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)
else:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)
if self.params.snippet_age_embedding:
embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)
snippet.set_embedding(embedding)
return snippets
def _initialize_discourse_states(self):
discourse_state = self.initial_discourse_state
discourse_lstm_states = []
for lstm in self.discourse_lstms:
hidden_size = lstm.weight_hh.size()[1]
if lstm.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
else:
h_0 = torch.zeros(1,hidden_size)
c_0 = torch.zeros(1,hidden_size)
discourse_lstm_states.append((h_0, c_0))
return discourse_state, discourse_lstm_states
def _add_positional_embeddings(self, hidden_states, utterances, group=False):
grouped_states = []
start_index = 0
for utterance in utterances:
grouped_states.append(hidden_states[start_index:start_index + len(utterance)])
start_index += len(utterance)
assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])
new_states = []
flat_sequence = []
num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))
for i, (states, utterance) in enumerate(zip(
grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):
positional_sequence = []
index = num_utterances_to_keep - i - 1
for state in states:
positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))
assert len(positional_sequence) == len(utterance), \
"Expected utterance and state sequence length to be the same, " \
+ "but they were " + str(len(utterance)) \
+ " and " + str(len(positional_sequence))
if group:
new_states.append(positional_sequence)
else:
new_states.extend(positional_sequence)
flat_sequence.extend(utterance)
return new_states, flat_sequence
def build_optim(self):
params_trainer = []
params_bert_trainer = []
for name, param in self.named_parameters():
if param.requires_grad:
if 'model_bert' in name:
params_bert_trainer.append(param)
else:
params_trainer.append(param)
self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)
if self.params.fine_tune_bert:
self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)
def set_dropout(self, value):
""" Sets the dropout to a specified value.
Inputs:
value (float): Value to set dropout to.
"""
self.dropout = value
def set_learning_rate(self, value):
""" Sets the learning rate for the trainer.
Inputs:
value (float): The new learning rate.
"""
for param_group in self.trainer.param_groups:
param_group['lr'] = value
def save(self, filename):
""" Saves the model to the specified filename.
Inputs:
filename (str): The filename to save to.
"""
torch.save(self.state_dict(), filename)
def load(self, filename):
""" Loads saved parameters into the parameter collection.
Inputs:
filename (str): Name of file containing parameters.
"""
self.load_state_dict(torch.load(filename))
print("Loaded model from file " + filename)
| 41.325871 | 226 | 0.618612 |
import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder, Encoder_Gnn
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
from .gated_graph_conv import GatedGraphConv
def get_token_indices(token, index_to_token):
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token):
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances):
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence
def encode_snippets_with_states(snippets, states):
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
def read_glove_embedding(embedding_filename, embedding_size):
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
class ATISModel(torch.nn.Module):
def __init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
super().__init__()
self.params = params
if params.use_bert:
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
self.gnn=None
if 'atis' not in params.data_directory:
if params.use_bert:
if params.use_gnn:
encoder_input_size = self.bert_config.hidden_size
encoder_output_size = params.encoder_state_size
self.gnn = GatedGraphConv(encoder_output_size, 2, 3)
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
else:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
params.input_embedding_size = input_embedding_size
self.params.input_embedding_size = input_embedding_size
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
initializer=input_vocabulary_embeddings,
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=params.freeze)
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = Embedder(params.input_embedding_size,
name="schema-embedding",
initializer=output_vocabulary_schema_embeddings,
vocabulary=output_vocabulary_schema,
anonymizer=anonymizer,
freeze=params.freeze)
else:
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
encoder_input_size = params.input_embedding_size
encoder_output_size = params.encoder_state_size
if params.use_bert:
encoder_input_size = self.bert_config.hidden_size
if params.discourse_level_lstm:
encoder_input_size += params.encoder_state_size / 2
self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)
attention_key_size = params.encoder_state_size
self.schema_attention_key_size = attention_key_size
if params.state_positional_embeddings:
attention_key_size += params.positional_embedding_size
self.positional_embedder = Embedder(
params.positional_embedding_size,
name="positional-embedding",
num_tokens=params.maximum_utterances)
self.utterance_attention_key_size = attention_key_size
if params.discourse_level_lstm:
self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, "LSTM-t")
self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), "V-turn-state-0")
final_snippet_size = 0
if params.use_snippets and not params.previous_decoder_snippet_encoding:
snippet_encoding_size = int(params.encoder_state_size / 2)
final_snippet_size = params.encoder_state_size
if params.snippet_age_embedding:
snippet_encoding_size -= int(
params.snippet_age_embedding_size / 4)
self.snippet_age_embedder = Embedder(
params.snippet_age_embedding_size,
name="snippet-age-embedding",
num_tokens=params.max_snippet_age_embedding)
final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2
self.snippet_encoder = Encoder(params.snippet_num_layers,
params.output_embedding_size,
snippet_encoding_size)
if params.use_previous_query:
self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)
self.final_snippet_size = final_snippet_size
self.dropout = 0.
def _encode_snippets(self, previous_query, snippets, input_schema):
startpoints = [snippet.startpos for snippet in snippets]
endpoints = [snippet.endpos for snippet in snippets]
assert len(startpoints) == 0 or min(startpoints) >= 0
if input_schema:
assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)
else:
assert len(endpoints) == 0 or max(endpoints) < len(previous_query)
snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
if previous_query and snippets:
_, previous_outputs = self.snippet_encoder(
previous_query, snippet_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
for snippet in snippets:
if input_schema:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)
else:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)
if self.params.snippet_age_embedding:
embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)
snippet.set_embedding(embedding)
return snippets
def _initialize_discourse_states(self):
discourse_state = self.initial_discourse_state
discourse_lstm_states = []
for lstm in self.discourse_lstms:
hidden_size = lstm.weight_hh.size()[1]
if lstm.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
else:
h_0 = torch.zeros(1,hidden_size)
c_0 = torch.zeros(1,hidden_size)
discourse_lstm_states.append((h_0, c_0))
return discourse_state, discourse_lstm_states
def _add_positional_embeddings(self, hidden_states, utterances, group=False):
grouped_states = []
start_index = 0
for utterance in utterances:
grouped_states.append(hidden_states[start_index:start_index + len(utterance)])
start_index += len(utterance)
assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])
new_states = []
flat_sequence = []
num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))
for i, (states, utterance) in enumerate(zip(
grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):
positional_sequence = []
index = num_utterances_to_keep - i - 1
for state in states:
positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))
assert len(positional_sequence) == len(utterance), \
"Expected utterance and state sequence length to be the same, " \
+ "but they were " + str(len(utterance)) \
+ " and " + str(len(positional_sequence))
if group:
new_states.append(positional_sequence)
else:
new_states.extend(positional_sequence)
flat_sequence.extend(utterance)
return new_states, flat_sequence
def build_optim(self):
params_trainer = []
params_bert_trainer = []
for name, param in self.named_parameters():
if param.requires_grad:
if 'model_bert' in name:
params_bert_trainer.append(param)
else:
params_trainer.append(param)
self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)
if self.params.fine_tune_bert:
self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)
def set_dropout(self, value):
self.dropout = value
def set_learning_rate(self, value):
for param_group in self.trainer.param_groups:
param_group['lr'] = value
def save(self, filename):
torch.save(self.state_dict(), filename)
def load(self, filename):
self.load_state_dict(torch.load(filename))
print("Loaded model from file " + filename)
| true | true |
f7207971f79f86b58e5d4a4b9ee3f1c8c602689e | 1,890 | py | Python | MNISTT.py | ankit9437/MNIST | bf620e7779a5383c2ad87cf89cd11651963bd7c5 | [
"MIT"
] | null | null | null | MNISTT.py | ankit9437/MNIST | bf620e7779a5383c2ad87cf89cd11651963bd7c5 | [
"MIT"
] | null | null | null | MNISTT.py | ankit9437/MNIST | bf620e7779a5383c2ad87cf89cd11651963bd7c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 10:58:44 2019
@author: DELL
"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def d(u, v):
diff = u - v
return diff.dot(diff)
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:] / 255.0 # data is from 0..255
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def plot_k_means(X, K, max_iter=5, beta=3.0, show_plots=False):
N, D = X.shape
# R = np.zeros((N, K))
exponents = np.empty((N, K))
# initialize M to random
initial_centers = np.random.choice(N, K, replace=False)
M = X[initial_centers]
k = 0
for i in range(max_iter):
k += 1
# step 1: determine assignments / resposibilities
# is this inefficient?
for k in range(K):
for n in range(N):
exponents[n,k] = np.exp(-beta*d(M[k], X[n]))
R = exponents / exponents.sum(axis=1, keepdims=True)
# step 2: recalculate means
for k in range(K):
M[k] = R[:,k].dot(X) / R[:,k].sum()
return M, R
def main():
# mnist data
X, Y = get_data(1000)
# simple data
# X = get_simple_data()
# Y = np.array([0]*300 + [1]*300 + [2]*300)
print("Number of data points:", len(Y))
M, R = plot_k_means(X, len(set(Y)))
# Exercise: Try different values of K and compare the evaluation metrics
# they should look like digits
for k in range(len(M)):
im = M[k].reshape(28, 28)
plt.imshow(im, cmap='Blues')
plt.show()
if __name__ == "__main__":
main() | 21.976744 | 77 | 0.539153 |
from __future__ import print_function, division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def d(u, v):
diff = u - v
return diff.dot(diff)
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:] / 255.0
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def plot_k_means(X, K, max_iter=5, beta=3.0, show_plots=False):
N, D = X.shape
exponents = np.empty((N, K))
initial_centers = np.random.choice(N, K, replace=False)
M = X[initial_centers]
k = 0
for i in range(max_iter):
k += 1
for k in range(K):
for n in range(N):
exponents[n,k] = np.exp(-beta*d(M[k], X[n]))
R = exponents / exponents.sum(axis=1, keepdims=True)
for k in range(K):
M[k] = R[:,k].dot(X) / R[:,k].sum()
return M, R
def main():
X, Y = get_data(1000)
print("Number of data points:", len(Y))
M, R = plot_k_means(X, len(set(Y)))
for k in range(len(M)):
im = M[k].reshape(28, 28)
plt.imshow(im, cmap='Blues')
plt.show()
if __name__ == "__main__":
main() | true | true |
f72079f7da23aae91f56118b998102076ab9cb85 | 5,205 | py | Python | email-finder.py | shivangraikar/Twitter-Data-Mining-For-Targeted-Marketing | d12fe807187d438041b4497cbb82ad9ef14d4dbf | [
"MIT"
] | 5 | 2021-02-25T12:10:02.000Z | 2021-11-13T04:03:42.000Z | email-finder.py | shivangraikar/Twitter-Data-Mining-For-Targeted-Marketing | d12fe807187d438041b4497cbb82ad9ef14d4dbf | [
"MIT"
] | null | null | null | email-finder.py | shivangraikar/Twitter-Data-Mining-For-Targeted-Marketing | d12fe807187d438041b4497cbb82ad9ef14d4dbf | [
"MIT"
] | 3 | 2021-02-25T12:10:06.000Z | 2021-03-21T20:26:15.000Z | import string
import time
import threading
import urllib
import re
import io
import sys
from time import sleep
import pickle
import pandas as pd
import psycopg2
def formats(first, middle, last, domain):
"""
Create a list of 30 possible email formats combining:
- First name: [empty] | Full | Initial |
- Delimeter: [empty] | . | _ | -
- Last name: [empty] | Full | Initial |
"""
list = []
if len(last)==0:
list.append(first + '@' + domain) # first@example.com
else:
list.append(first[0] + last + '@' + domain) # flast@example.com
list.append(first[0] + '.' + last + '@' + domain) # f.last@example.com
list.append(first[0] + '_' + last + '@' + domain) # f_last@example.com
list.append(first + '@' + domain) # first@example.com
list.append(first + last + '@' + domain) # firstlast@example.com
list.append(first + '.' + last + '@' + domain) # first.last@example.com
list.append(first + '_' + last + '@' + domain) # first_last@example.com
list.append(first + '-' + last + '@' + domain) # first-last@example.com
list.append(first + last[0] + '@' + domain) # fistl@example.com
list.append(first + '.' + last[0] + '@' + domain) # first.l@example.com
list.append(first + '_' + last[0] + '@' + domain) # fist_l@example.com
list.append(first[0] + middle + last + '@' + domain) # fmiddlelast@example.com
list.append(first[0] + '.' + middle + last + '@' + domain) # f.middlelast@example.com
list.append(first[0] + middle + '.' + last + '@' + domain) # fmiddle.last@example.com
list.append(first[0] + '_' + middle+ last + '@' + domain) # f_middlelast@example.com
list.append(first[0] + middle +'_' + last + '@' + domain) # fmiddle_last@example.com
list.append(first + middle+ last + '@' + domain) # firstmiddlelast@example.com
list.append(first + middle + '.' + last + '@' + domain) # firstmiddle.last@example.com
list.append(first + '.' + middle + last + '@' + domain) # first.middlelast@example.com
list.append(first + '_' + middle + last + '@' + domain) # first_last@example.com
list.append(first + middle + '_' + last + '@' + domain) # first_last@example.com
list.append(first + middle+ last[0] + '@' + domain) # firstmiddlel@example.com
list.append(first + '.' + middle +last[0] + '@' + domain) # first.middlel@example.com
list.append(first + middle + '.' +last[0] + '@' + domain) # firstmiddle.l@example.com
list.append(first + '_' + middle +last[0] + '@' + domain) # first_middlel@example.com
list.append(first + middle +'_' + last[0] + '@' + domain) # firstmiddle_l@example.com
list.append(last + '@' + domain) # last@example.com
list.append(last + first+ '@' + domain) # lastfirst@example.com
list.append(last + '.' + first + '@' + domain) # last.first@example.com
list.append(last + '_' + first + '@' + domain) # last_first@example.com
list.append(last[0] + '.' + first + '@' + domain) # l.first@example.com
list.append(last[0] + first + '@' + domain) # lfirst@example.com
list.append(last + first[0] + '@' + domain) # lastf@example.com
list.append(last + '.' + first[0] + '@' + domain) # last.f@example.com
list.append(last + '_' + first[0] + '@' + domain) # last_f@example.com
return(list)
val="select distinct name from keywords"
try:
conn = psycopg2.connect(database='Hiranandani', user = "postgres", password = "parth123n@#*", host = "127.0.0.1", port = "5432")
except:
print("Create database first")
df=pd.read_sql(val,conn)
uname=list()
for i in df['name']:
uname.append(i.translate(str.maketrans('', '', string.punctuation)))
a=['dr','ca','er']
notdrca=list()
for i in uname:
if any(x in i.lower() for x in a):
continue
else:
notdrca.append(i)
len2=list()
l1=list()
l3=list()
ln=list()
email_list=list()
for i in notdrca:
if any(x in i.lower() for x in a):
print(i)
for i in notdrca:
try:
i=i.lower()
s=i.split()
if len(s)==2:
email_list.extend(formats(s[0],s[1],'','gmail.com'))
len2.append(i)
elif len(s)==1:
email_list.extend(formats(s[0],'','','gmail.com'))
l1.append(i)
elif len(s)==3:
email_list.extend(formats(s[0],s[1],s[2],'gmail.com'))
l3.append(i)
elif len(s)>3:
ln.append(i)
continue
except:
continue
try:
h=open('emails.pickle','wb')
except Exception as e:
print(e)
pickle.dump(email_list,h)
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
match=re.match(regex,'harsha_nihar@yahoon')
if match==None:
print(match)
| 36.65493 | 136 | 0.529491 | import string
import time
import threading
import urllib
import re
import io
import sys
from time import sleep
import pickle
import pandas as pd
import psycopg2
def formats(first, middle, last, domain):
list = []
if len(last)==0:
list.append(first + '@' + domain)
else:
list.append(first[0] + last + '@' + domain)
list.append(first[0] + '.' + last + '@' + domain)
list.append(first[0] + '_' + last + '@' + domain)
list.append(first + '@' + domain)
list.append(first + last + '@' + domain)
list.append(first + '.' + last + '@' + domain)
list.append(first + '_' + last + '@' + domain)
list.append(first + '-' + last + '@' + domain)
list.append(first + last[0] + '@' + domain)
list.append(first + '.' + last[0] + '@' + domain)
list.append(first + '_' + last[0] + '@' + domain)
list.append(first[0] + middle + last + '@' + domain)
list.append(first[0] + '.' + middle + last + '@' + domain)
list.append(first[0] + middle + '.' + last + '@' + domain)
list.append(first[0] + '_' + middle+ last + '@' + domain)
list.append(first[0] + middle +'_' + last + '@' + domain)
list.append(first + middle+ last + '@' + domain)
list.append(first + middle + '.' + last + '@' + domain)
list.append(first + '.' + middle + last + '@' + domain)
list.append(first + '_' + middle + last + '@' + domain)
list.append(first + middle + '_' + last + '@' + domain)
list.append(first + middle+ last[0] + '@' + domain)
list.append(first + '.' + middle +last[0] + '@' + domain)
list.append(first + middle + '.' +last[0] + '@' + domain)
list.append(first + '_' + middle +last[0] + '@' + domain)
list.append(first + middle +'_' + last[0] + '@' + domain)
list.append(last + '@' + domain)
list.append(last + first+ '@' + domain)
list.append(last + '.' + first + '@' + domain)
list.append(last + '_' + first + '@' + domain)
list.append(last[0] + '.' + first + '@' + domain)
list.append(last[0] + first + '@' + domain)
list.append(last + first[0] + '@' + domain)
list.append(last + '.' + first[0] + '@' + domain)
list.append(last + '_' + first[0] + '@' + domain)
return(list)
val="select distinct name from keywords"
try:
conn = psycopg2.connect(database='Hiranandani', user = "postgres", password = "parth123n@#*", host = "127.0.0.1", port = "5432")
except:
print("Create database first")
df=pd.read_sql(val,conn)
uname=list()
for i in df['name']:
uname.append(i.translate(str.maketrans('', '', string.punctuation)))
a=['dr','ca','er']
notdrca=list()
for i in uname:
if any(x in i.lower() for x in a):
continue
else:
notdrca.append(i)
len2=list()
l1=list()
l3=list()
ln=list()
email_list=list()
for i in notdrca:
if any(x in i.lower() for x in a):
print(i)
for i in notdrca:
try:
i=i.lower()
s=i.split()
if len(s)==2:
email_list.extend(formats(s[0],s[1],'','gmail.com'))
len2.append(i)
elif len(s)==1:
email_list.extend(formats(s[0],'','','gmail.com'))
l1.append(i)
elif len(s)==3:
email_list.extend(formats(s[0],s[1],s[2],'gmail.com'))
l3.append(i)
elif len(s)>3:
ln.append(i)
continue
except:
continue
try:
h=open('emails.pickle','wb')
except Exception as e:
print(e)
pickle.dump(email_list,h)
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
match=re.match(regex,'harsha_nihar@yahoon')
if match==None:
print(match)
| true | true |
f7207a2b951aa4caf689bd23c876ef2c79f64116 | 22,958 | py | Python | lib/python3.7/site-packages/django/db/backends/sqlite3/base.py | Boring-Mind/DjangoGirls1 | 54ac8f01d12785470fd5a4ece759206639997122 | [
"Apache-2.0"
] | 304 | 2015-01-06T18:02:49.000Z | 2021-12-11T18:08:37.000Z | lib/python3.7/site-packages/django/db/backends/sqlite3/base.py | Boring-Mind/DjangoGirls1 | 54ac8f01d12785470fd5a4ece759206639997122 | [
"Apache-2.0"
] | 123 | 2019-09-10T14:48:01.000Z | 2019-11-28T21:24:06.000Z | virtual/lib/python3.6/site-packages/django/db/backends/sqlite3/base.py | Krasivaya/Tracks | c18d1c9222dff39e4678d44495a8a7d9434339ff | [
"MIT"
] | 41 | 2015-04-11T14:58:02.000Z | 2021-11-13T20:47:58.000Z | """
SQLite backend for the sqlite3 module in the standard library.
"""
import datetime
import decimal
import functools
import math
import operator
import re
import statistics
import warnings
from itertools import chain
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
def none_guard(func):
"""
Decorator that returns None if any of the arguments to the decorated
function are None. Many SQL functions return NULL if any of their arguments
are NULL. This decorator simplifies the implementation of this for the
custom functions registered below.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return None if None in args else func(*args, **kwargs)
return wrapper
def list_aggregate(function):
"""
Return an aggregate class that accumulates values in a list and applies
the provided function to the data.
"""
return type('ListAggregate', (list,), {'finalize': function, 'step': list.append})
def check_sqlite_version():
if Database.sqlite_version_info < (3, 8, 3):
raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version)
check_sqlite_version()
Database.register_converter("bool", b'1'.__eq__)
Database.register_converter("time", decoder(parse_time))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_adapter(decimal.Decimal, str)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See https://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
**settings_dict['OPTIONS'],
}
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False, 'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_datetime_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function('regexp', 2, _sqlite_regexp)
conn.create_function('ACOS', 1, none_guard(math.acos))
conn.create_function('ASIN', 1, none_guard(math.asin))
conn.create_function('ATAN', 1, none_guard(math.atan))
conn.create_function('ATAN2', 2, none_guard(math.atan2))
conn.create_function('CEILING', 1, none_guard(math.ceil))
conn.create_function('COS', 1, none_guard(math.cos))
conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x)))
conn.create_function('DEGREES', 1, none_guard(math.degrees))
conn.create_function('EXP', 1, none_guard(math.exp))
conn.create_function('FLOOR', 1, none_guard(math.floor))
conn.create_function('LN', 1, none_guard(math.log))
conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x)))
conn.create_function('LPAD', 3, _sqlite_lpad)
conn.create_function('MOD', 2, none_guard(math.fmod))
conn.create_function('PI', 0, lambda: math.pi)
conn.create_function('POWER', 2, none_guard(operator.pow))
conn.create_function('RADIANS', 1, none_guard(math.radians))
conn.create_function('REPEAT', 2, none_guard(operator.mul))
conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1]))
conn.create_function('RPAD', 3, _sqlite_rpad)
conn.create_function('SIN', 1, none_guard(math.sin))
conn.create_function('SQRT', 1, none_guard(math.sqrt))
conn.create_function('TAN', 1, none_guard(math.tan))
conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev))
conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev))
conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance))
conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance))
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
with self.cursor() as cursor:
cursor.execute('PRAGMA foreign_keys = OFF')
# Foreign key constraints cannot be turned off while in a multi-
# statement transaction. Fetch the current state of the pragma
# to determine if constraints are effectively disabled.
enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0]
return not bool(enabled)
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
if self.features.supports_pragma_foreign_key_check:
with self.cursor() as cursor:
if table_names is None:
violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall()
else:
violations = chain.from_iterable(
cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall()
for table_name in table_names
)
# See https://www.sqlite.org/pragma.html#pragma_foreign_key_check
for table_name, rowid, referenced_table_name, foreign_key_index in violations:
foreign_key = cursor.execute(
'PRAGMA foreign_key_list(%s)' % table_name
).fetchall()[foreign_key_index]
column_name, referenced_column_name = foreign_key[3:5]
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
primary_key_value, bad_value = cursor.execute(
'SELECT %s, %s FROM %s WHERE rowid = %%s' % (
primary_key_column_name, column_name, table_name
),
(rowid,),
).fetchone()
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, primary_key_value, table_name, column_name,
bad_value, referenced_table_name, referenced_column_name
)
)
else:
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_datetime_parse(dt, tzname=None):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt):
dt = _sqlite_datetime_parse(dt)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
elif lookup_type == 'iso_year':
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
@none_guard
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a datetime
"""
try:
real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs)
real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
@none_guard
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
@none_guard
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return duration_microseconds(left - right)
@none_guard
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string)))
@none_guard
def _sqlite_lpad(text, length, fill_text):
if len(text) >= length:
return text[:length]
return (fill_text * length)[:length - len(text)] + text
@none_guard
def _sqlite_rpad(text, length, fill_text):
return (text + fill_text * length)[:length]
| 40.923351 | 115 | 0.619828 | import datetime
import decimal
import functools
import math
import operator
import re
import statistics
import warnings
from itertools import chain
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from .client import DatabaseClient
from .creation import DatabaseCreation
from .features import DatabaseFeatures
from .introspection import DatabaseIntrospection
from .operations import DatabaseOperations
from .schema import DatabaseSchemaEditor
def decoder(conv_func):
return lambda s: conv_func(s.decode())
def none_guard(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return None if None in args else func(*args, **kwargs)
return wrapper
def list_aggregate(function):
return type('ListAggregate', (list,), {'finalize': function, 'step': list.append})
def check_sqlite_version():
if Database.sqlite_version_info < (3, 8, 3):
raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version)
check_sqlite_version()
Database.register_converter("bool", b'1'.__eq__)
Database.register_converter("time", decoder(parse_time))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_adapter(decimal.Decimal, str)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See https://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
**settings_dict['OPTIONS'],
}
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False, 'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_datetime_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function('regexp', 2, _sqlite_regexp)
conn.create_function('ACOS', 1, none_guard(math.acos))
conn.create_function('ASIN', 1, none_guard(math.asin))
conn.create_function('ATAN', 1, none_guard(math.atan))
conn.create_function('ATAN2', 2, none_guard(math.atan2))
conn.create_function('CEILING', 1, none_guard(math.ceil))
conn.create_function('COS', 1, none_guard(math.cos))
conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x)))
conn.create_function('DEGREES', 1, none_guard(math.degrees))
conn.create_function('EXP', 1, none_guard(math.exp))
conn.create_function('FLOOR', 1, none_guard(math.floor))
conn.create_function('LN', 1, none_guard(math.log))
conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x)))
conn.create_function('LPAD', 3, _sqlite_lpad)
conn.create_function('MOD', 2, none_guard(math.fmod))
conn.create_function('PI', 0, lambda: math.pi)
conn.create_function('POWER', 2, none_guard(operator.pow))
conn.create_function('RADIANS', 1, none_guard(math.radians))
conn.create_function('REPEAT', 2, none_guard(operator.mul))
conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1]))
conn.create_function('RPAD', 3, _sqlite_rpad)
conn.create_function('SIN', 1, none_guard(math.sin))
conn.create_function('SQRT', 1, none_guard(math.sqrt))
conn.create_function('TAN', 1, none_guard(math.tan))
conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev))
conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev))
conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance))
conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance))
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
level = ''
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
with self.cursor() as cursor:
cursor.execute('PRAGMA foreign_keys = OFF')
enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0]
return not bool(enabled)
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
if self.features.supports_pragma_foreign_key_check:
with self.cursor() as cursor:
if table_names is None:
violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall()
else:
violations = chain.from_iterable(
cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall()
for table_name in table_names
)
e_name, rowid, referenced_table_name, foreign_key_index in violations:
foreign_key = cursor.execute(
'PRAGMA foreign_key_list(%s)' % table_name
).fetchall()[foreign_key_index]
column_name, referenced_column_name = foreign_key[3:5]
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
primary_key_value, bad_value = cursor.execute(
'SELECT %s, %s FROM %s WHERE rowid = %%s' % (
primary_key_column_name, column_name, table_name
),
(rowid,),
).fetchone()
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, primary_key_value, table_name, column_name,
bad_value, referenced_table_name, referenced_column_name
)
)
else:
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_datetime_parse(dt, tzname=None):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt):
dt = _sqlite_datetime_parse(dt)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
elif lookup_type == 'iso_year':
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
@none_guard
def _sqlite_format_dtdelta(conn, lhs, rhs):
try:
real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs)
real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
return str(out)
@none_guard
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
@none_guard
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return duration_microseconds(left - right)
@none_guard
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string)))
@none_guard
def _sqlite_lpad(text, length, fill_text):
if len(text) >= length:
return text[:length]
return (fill_text * length)[:length - len(text)] + text
@none_guard
def _sqlite_rpad(text, length, fill_text):
return (text + fill_text * length)[:length]
| true | true |
f7207bb8de11f352cd6af90099062c5d0ac72db3 | 2,345 | py | Python | tests/ccapi/test__attr__.py | achillesrasquinha/CCPy | 7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822 | [
"MIT"
] | 9 | 2020-05-12T08:16:35.000Z | 2022-01-06T03:22:18.000Z | tests/ccapi/test__attr__.py | achillesrasquinha/CCPy | 7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822 | [
"MIT"
] | 3 | 2020-10-14T16:29:24.000Z | 2021-10-04T07:24:34.000Z | tests/ccapi/test__attr__.py | achillesrasquinha/CCPy | 7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822 | [
"MIT"
] | 1 | 2019-12-30T23:13:46.000Z | 2019-12-30T23:13:46.000Z | # imports - standard imports
import os.path as osp
import subprocess
# imports - test imports
import pytest
# imports - module imports
from ccapi.__attr__ import (
read,
pardir,
strip,
safe_decode,
sequence_filter,
get_revision
)
def call(*args, **kwargs):
subprocess.call(args, **kwargs)
def test_read(tmpdir):
directory = tmpdir.mkdir("tmp")
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
assert tempfile.read() == read(str(tempfile))
tempfile = directory.join("barfoo.txt")
tempfile.write(\
"""
foobar
\n
barfoo
"""
)
assert tempfile.read() == read(str(tempfile))
def test_pardir():
assert pardir(__file__) == osp.dirname(__file__)
assert pardir(__file__, 2) == osp.dirname(osp.dirname(__file__))
def test_strip():
string = "foobar"
assert strip(string) == string
string = "\n foobar\nfoobar \n "
assert strip(string) == "foobar\nfoobar"
string = "\n\n\n"
assert strip(string) == ""
string = "\r\nfoobar\nfoobar\n"
assert strip(string) == "foobar\nfoobar"
def test_safe_decode():
assert safe_decode(b"foobar") == "foobar"
assert safe_decode( "foobar") == "foobar"
assert safe_decode(123456789) == 123456789
def test_sequence_filter():
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 == 0) == [0,2,4]
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 != 0, type_ = tuple) == (1,3,5)
def test_get_revision(tmpdir):
directory = tmpdir.mkdir("tmp")
path = str(directory)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
assert get_revision(path, raise_err = False) == None
# Initialize the git repository
call("git","init",path)
call("git","config","user.email","foobar@foobar.com", cwd = path)
call("git","config","user.name" ,"Foo Bar", cwd = path)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
call("git","add",".", cwd = path)
call("git","commit","-m","'Test Commit'", cwd = path)
assert len(get_revision(path)) == 40
assert len(get_revision(path, short = True)) == 7 | 26.055556 | 99 | 0.620469 |
import os.path as osp
import subprocess
import pytest
from ccapi.__attr__ import (
read,
pardir,
strip,
safe_decode,
sequence_filter,
get_revision
)
def call(*args, **kwargs):
subprocess.call(args, **kwargs)
def test_read(tmpdir):
directory = tmpdir.mkdir("tmp")
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
assert tempfile.read() == read(str(tempfile))
tempfile = directory.join("barfoo.txt")
tempfile.write(\
"""
foobar
\n
barfoo
"""
)
assert tempfile.read() == read(str(tempfile))
def test_pardir():
assert pardir(__file__) == osp.dirname(__file__)
assert pardir(__file__, 2) == osp.dirname(osp.dirname(__file__))
def test_strip():
string = "foobar"
assert strip(string) == string
string = "\n foobar\nfoobar \n "
assert strip(string) == "foobar\nfoobar"
string = "\n\n\n"
assert strip(string) == ""
string = "\r\nfoobar\nfoobar\n"
assert strip(string) == "foobar\nfoobar"
def test_safe_decode():
assert safe_decode(b"foobar") == "foobar"
assert safe_decode( "foobar") == "foobar"
assert safe_decode(123456789) == 123456789
def test_sequence_filter():
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 == 0) == [0,2,4]
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 != 0, type_ = tuple) == (1,3,5)
def test_get_revision(tmpdir):
directory = tmpdir.mkdir("tmp")
path = str(directory)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
assert get_revision(path, raise_err = False) == None
call("git","init",path)
call("git","config","user.email","foobar@foobar.com", cwd = path)
call("git","config","user.name" ,"Foo Bar", cwd = path)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
call("git","add",".", cwd = path)
call("git","commit","-m","'Test Commit'", cwd = path)
assert len(get_revision(path)) == 40
assert len(get_revision(path, short = True)) == 7 | true | true |
f7207db9c00de82d099854acdd08b0a2728247b5 | 1,679 | py | Python | genderPredictScript.py | ganesh2583/Python-Data_Science | 233586491d3863176a008b938b0946c472940a6d | [
"MIT"
] | null | null | null | genderPredictScript.py | ganesh2583/Python-Data_Science | 233586491d3863176a008b938b0946c472940a6d | [
"MIT"
] | null | null | null | genderPredictScript.py | ganesh2583/Python-Data_Science | 233586491d3863176a008b938b0946c472940a6d | [
"MIT"
] | null | null | null | from sklearn import tree
from sklearn import neighbors
from sklearn import gaussian_process
#[height, weight, shoe size]
X = [[181,80,10],[161,70,6],[171,66,7],[176,88,7],[189,100,8],[141,80,5],[156,78,6],[161,50,6],[171,60,7],[151,78,7],[171,40,7]]
#Gender
Y = ['male','male','male','male','male','female','female','female','female','female','female']
#Define 'DecisionTreeClassifier' Classifier From the imported Tree
decisionTreeclassifier = tree.DecisionTreeClassifier()
#Fit the data into the Classifier
decisionTreeclassifier = decisionTreeclassifier.fit(X,Y)
#Perform Prediction
decisionTreeclassifierPrediction = decisionTreeclassifier.predict([[161,60,9]])
#Print the Classifier
print(decisionTreeclassifier)
#Print the Prediction
print(decisionTreeclassifierPrediction)
#Define 'KNeighborsClassifier' Classifier From the imported Tree
kNeighborsClassifier = neighbors.KNeighborsClassifier()
#Fit the data into the Classifier
kNeighborsClassifier = kNeighborsClassifier.fit(X,Y)
#Perform Prediction
kNeighborsClassifierPrediction = kNeighborsClassifier.predict([[161,60,9]])
#Print the Classifier
print(kNeighborsClassifier)
#Print the Prediction
print(kNeighborsClassifierPrediction)
#Define 'GaussianProcessClassifier' Classifier From the imported Tree
gaussianProcessClassifier = gaussian_process.GaussianProcessClassifier()
#Fit the data into the Classifier
gaussianProcessClassifier = gaussianProcessClassifier.fit(X,Y)
#Perform Prediction
gaussianProcessClassifierPrediction = gaussianProcessClassifier.predict([[161,60,9]])
#Print the Classifier
print(gaussianProcessClassifier)
#Print the Prediction
print(gaussianProcessClassifierPrediction) | 29.45614 | 128 | 0.805837 | from sklearn import tree
from sklearn import neighbors
from sklearn import gaussian_process
X = [[181,80,10],[161,70,6],[171,66,7],[176,88,7],[189,100,8],[141,80,5],[156,78,6],[161,50,6],[171,60,7],[151,78,7],[171,40,7]]
Y = ['male','male','male','male','male','female','female','female','female','female','female']
decisionTreeclassifier = tree.DecisionTreeClassifier()
decisionTreeclassifier = decisionTreeclassifier.fit(X,Y)
decisionTreeclassifierPrediction = decisionTreeclassifier.predict([[161,60,9]])
print(decisionTreeclassifier)
print(decisionTreeclassifierPrediction)
kNeighborsClassifier = neighbors.KNeighborsClassifier()
kNeighborsClassifier = kNeighborsClassifier.fit(X,Y)
kNeighborsClassifierPrediction = kNeighborsClassifier.predict([[161,60,9]])
print(kNeighborsClassifier)
print(kNeighborsClassifierPrediction)
gaussianProcessClassifier = gaussian_process.GaussianProcessClassifier()
gaussianProcessClassifier = gaussianProcessClassifier.fit(X,Y)
gaussianProcessClassifierPrediction = gaussianProcessClassifier.predict([[161,60,9]])
print(gaussianProcessClassifier)
print(gaussianProcessClassifierPrediction) | true | true |
f7207ef7a8d650d8bd9ead304e0f30c6f37038f4 | 366 | py | Python | main.py | zruss11/Nike-InboxCheck | 5e3be689797a8d861e4894b1df4bd6ed8f1066e8 | [
"MIT"
] | 11 | 2017-09-09T03:47:18.000Z | 2019-11-15T14:12:51.000Z | main.py | zruss11/Nike-InboxCheck | 5e3be689797a8d861e4894b1df4bd6ed8f1066e8 | [
"MIT"
] | null | null | null | main.py | zruss11/Nike-InboxCheck | 5e3be689797a8d861e4894b1df4bd6ed8f1066e8 | [
"MIT"
] | 5 | 2017-09-09T03:48:07.000Z | 2020-07-04T00:59:01.000Z | import requests
from classes.login import Login
from classes.logger import logger
log = logger().log
with open('config/accounts.txt') as accounts_file:
accounts = accounts_file.read().splitlines()
def run(x):
req = requests.Session()
log("{} Attempting Login".format(x.split(':')[0]))
l = Login(req)
l.login(x)
for x in accounts:
run(x) | 19.263158 | 54 | 0.674863 | import requests
from classes.login import Login
from classes.logger import logger
log = logger().log
with open('config/accounts.txt') as accounts_file:
accounts = accounts_file.read().splitlines()
def run(x):
req = requests.Session()
log("{} Attempting Login".format(x.split(':')[0]))
l = Login(req)
l.login(x)
for x in accounts:
run(x) | true | true |
f7207f32e150c7bc0ac42fe89ad3f6575a482a24 | 2,502 | py | Python | google/cloud/bigquery/_http.py | msuozzo/python-bigquery | dcb8728c12f5ab0d7809a1b6cf72755dff973772 | [
"Apache-2.0"
] | 384 | 2020-02-07T06:39:26.000Z | 2022-03-30T18:25:50.000Z | google/cloud/bigquery/_http.py | msuozzo/python-bigquery | dcb8728c12f5ab0d7809a1b6cf72755dff973772 | [
"Apache-2.0"
] | 770 | 2020-02-04T10:46:40.000Z | 2022-03-31T15:12:19.000Z | google/cloud/bigquery/_http.py | msuozzo/python-bigquery | dcb8728c12f5ab0d7809a1b6cf72755dff973772 | [
"Apache-2.0"
] | 177 | 2020-02-06T05:24:31.000Z | 2022-03-25T18:51:36.000Z | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google BigQuery connections."""
import os
import pkg_resources
from google.cloud import _http # type: ignore # pytype: disable=import-error
from google.cloud.bigquery import __version__
# TODO: Increase the minimum version of google-cloud-core to 1.6.0
# and remove this logic. See:
# https://github.com/googleapis/python-bigquery/issues/509
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") == "true": # pragma: NO COVER
release = pkg_resources.get_distribution("google-cloud-core").parsed_version
if release < pkg_resources.parse_version("1.6.0"):
raise ImportError("google-cloud-core >= 1.6.0 is required to use mTLS feature")
class Connection(_http.JSONConnection):
"""A connection to Google BigQuery via the JSON REST API.
Args:
client (google.cloud.bigquery.client.Client): The client that owns the current connection.
client_info (Optional[google.api_core.client_info.ClientInfo]): Instance used to generate user agent.
api_endpoint (str): The api_endpoint to use. If None, the library will decide what endpoint to use.
"""
DEFAULT_API_ENDPOINT = "https://bigquery.googleapis.com"
DEFAULT_API_MTLS_ENDPOINT = "https://bigquery.mtls.googleapis.com"
def __init__(self, client, client_info=None, api_endpoint=None):
super(Connection, self).__init__(client, client_info)
self.API_BASE_URL = api_endpoint or self.DEFAULT_API_ENDPOINT
self.API_BASE_MTLS_URL = self.DEFAULT_API_MTLS_ENDPOINT
self.ALLOW_AUTO_SWITCH_TO_MTLS_URL = api_endpoint is None
self._client_info.gapic_version = __version__
self._client_info.client_library_version = __version__
API_VERSION = "v2"
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = "{api_base_url}/bigquery/{api_version}{path}"
"""A template for the URL of a particular API call."""
| 41.7 | 109 | 0.742606 |
import os
import pkg_resources
from google.cloud import _http port __version__
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") == "true":
release = pkg_resources.get_distribution("google-cloud-core").parsed_version
if release < pkg_resources.parse_version("1.6.0"):
raise ImportError("google-cloud-core >= 1.6.0 is required to use mTLS feature")
class Connection(_http.JSONConnection):
DEFAULT_API_ENDPOINT = "https://bigquery.googleapis.com"
DEFAULT_API_MTLS_ENDPOINT = "https://bigquery.mtls.googleapis.com"
def __init__(self, client, client_info=None, api_endpoint=None):
super(Connection, self).__init__(client, client_info)
self.API_BASE_URL = api_endpoint or self.DEFAULT_API_ENDPOINT
self.API_BASE_MTLS_URL = self.DEFAULT_API_MTLS_ENDPOINT
self.ALLOW_AUTO_SWITCH_TO_MTLS_URL = api_endpoint is None
self._client_info.gapic_version = __version__
self._client_info.client_library_version = __version__
API_VERSION = "v2"
API_URL_TEMPLATE = "{api_base_url}/bigquery/{api_version}{path}"
| true | true |
f720804843b0cf052334ea5fd31c198aa77f5bcf | 51,935 | py | Python | tests/test_catalina_10_15_7.py | RhetTbull/osxphotos | 0e9b9d625190b94c1dd68276e3b0e5367002d87c | [
"MIT"
] | 656 | 2019-08-14T14:10:44.000Z | 2022-03-28T15:25:42.000Z | tests/test_catalina_10_15_7.py | RhetTbull/osxphotos | 0e9b9d625190b94c1dd68276e3b0e5367002d87c | [
"MIT"
] | 557 | 2019-10-14T19:00:02.000Z | 2022-03-28T00:48:30.000Z | tests/test_catalina_10_15_7.py | RhetTbull/osxphotos | 0e9b9d625190b94c1dd68276e3b0e5367002d87c | [
"MIT"
] | 58 | 2019-12-27T01:39:33.000Z | 2022-02-26T22:18:49.000Z | """ Basic tests for Photos 5 on MacOS 10.15.7 """
import datetime
import os
import os.path
import pathlib
import sqlite3
import tempfile
import time
from collections import Counter, namedtuple
import pytest
import osxphotos
from osxphotos._constants import _UNKNOWN_PERSON
from osxphotos.utils import _get_os_version
OS_VERSION = _get_os_version()
SKIP_TEST = "OSXPHOTOS_TEST_EXPORT" not in os.environ or OS_VERSION[1] != "15"
PHOTOS_DB_LOCAL = os.path.expanduser("~/Pictures/Photos Library.photoslibrary")
PHOTOS_DB = "tests/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_DB_PATH = "/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_LIBRARY_PATH = "/Test-10.15.7.photoslibrary"
PHOTOS_DB_LEN = 25
PHOTOS_NOT_IN_TRASH_LEN = 23
PHOTOS_IN_TRASH_LEN = 2
PHOTOS_DB_IMPORT_SESSIONS = 17
KEYWORDS = [
"Kids",
"wedding",
"flowers",
"England",
"London",
"London 2018",
"St. James's Park",
"UK",
"United Kingdom",
"foo/bar",
"Travel",
"Maria",
"Drink",
"Val d'Isère",
"Wine",
"Wine Bottle",
"Food",
"Furniture",
"Pizza",
"Table",
"Cloudy",
"Cord",
"Outdoor",
"Sky",
"Sunset Sunrise",
]
# Photos 5 includes blank person for detected face
PERSONS = ["Katie", "Suzy", "Maria", _UNKNOWN_PERSON]
ALBUMS = [
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum",
"2019-10/11 Paris Clermont",
"AlbumInFolder",
"EmptyAlbum",
"I have a deleted twin", # there's an empty album with same name that has been deleted
"Multi Keyword",
"Pumpkin Farm",
"Raw",
"Sorted Manual",
"Sorted Newest First",
"Sorted Oldest First",
"Sorted Title",
"Test Album", # there are 2 albums named "Test Album" for testing duplicate album names
]
KEYWORDS_DICT = {
"Drink": 2,
"England": 1,
"Kids": 4,
"London 2018": 1,
"London": 1,
"Maria": 1,
"St. James's Park": 1,
"Travel": 2,
"UK": 1,
"United Kingdom": 1,
"Val d'Isère": 2,
"Wine Bottle": 2,
"Wine": 2,
"flowers": 1,
"foo/bar": 1,
"wedding": 3,
"Food": 2,
"Furniture": 2,
"Pizza": 2,
"Table": 2,
"Cloudy": 2,
"Cord": 2,
"Outdoor": 2,
"Sky": 2,
"Sunset Sunrise": 2,
}
PERSONS_DICT = {"Katie": 3, "Suzy": 2, "Maria": 2, _UNKNOWN_PERSON: 1}
ALBUM_DICT = {
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum": 1,
"2019-10/11 Paris Clermont": 1,
"AlbumInFolder": 2,
"EmptyAlbum": 0,
"I have a deleted twin": 1,
"Multi Keyword": 2,
"Pumpkin Farm": 3,
"Raw": 4,
"Sorted Manual": 3,
"Sorted Newest First": 3,
"Sorted Oldest First": 3,
"Sorted Title": 3,
"Test Album": 2,
} # Note: there are 2 albums named "Test Album" for testing duplicate album names
UUID_DICT = {
"missing": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"favorite": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"not_favorite": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"hidden": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"not_hidden": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"has_adjustments": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"adjustments_info": "7783E8E6-9CAC-40F3-BE22-81FB7051C266",
"no_adjustments": "D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068",
"location": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_location": "6191423D-8DB8-4D4C-92BE-9BBBA308AAC4",
"external_edit": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_external_edit": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"export": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"export_tif": "8846E3E6-8AC8-4857-8448-E3D025784410",
"in_album": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"date_invalid": "8846E3E6-8AC8-4857-8448-E3D025784410",
"intrash": "71E3E212-00EB-430D-8A63-5E294B268554",
"not_intrash": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"intrash_person_keywords": "6FD38366-3BF2-407D-81FE-7153EB6125B6",
"import_session": "8846E3E6-8AC8-4857-8448-E3D025784410",
"movie": "D1359D09-1373-4F3B-B0E3-1A4DE573E4A3",
"description_newlines": "7F74DD34-5920-4DA3-B284-479887A34F66",
"no_duplicates": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"multi_query_1": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"multi_query_2": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
}
UUID_DICT_LOCAL = {
"not_visible": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_key": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_not_key": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst_selected": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"burst_not_selected": "89E235DD-B9AC-4E8D-BDA2-986981CA7582", # IMG_9813.JPG
"burst_default": "F5E6BD24-B493-44E9-BDA2-7AD9D2CC8C9D", # IMG_9816.JPG
"burst_not_default": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"live_edited": "54A01B04-16D7-4FDE-8860-19F2A641E433", # IMG_3203.HEIC
"live": "8EC216A2-0032-4934-BD3F-04C6259B3304", # IMG_3259.HEIC
}
UUID_PUMPKIN_FARM = [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
]
ALBUM_SORT_ORDER = [
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
]
ALBUM_KEY_PHOTO = "D79B8D77-BFFC-460B-9312-034F2877D35B"
UTI_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.jpeg",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
UTI_ORIGINAL_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.heic",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
RawInfo = namedtuple(
"RawInfo",
[
"comment",
"original_filename",
"has_raw",
"israw",
"raw_original",
"uti",
"uti_original",
"uti_raw",
],
)
RAW_DICT = {
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068": RawInfo(
"raw image, no jpeg pair",
"DSC03584.dng",
False,
True,
False,
"com.adobe.raw-image",
"com.adobe.raw-image",
None,
),
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": RawInfo(
"raw+jpeg, jpeg original",
"IMG_1994.JPG",
True,
False,
False,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"4D521201-92AC-43E5-8F7C-59BC41C37A96": RawInfo(
"raw+jpeg, raw original",
"IMG_1997.JPG",
True,
False,
True,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51": RawInfo(
"jpeg, no raw",
"wedding.jpg",
False,
False,
False,
"public.jpeg",
"public.jpeg",
None,
),
}
ORIGINAL_FILENAME_DICT = {
"uuid": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"filename": "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg",
"original_filename": "Pumkins2.jpg",
}
UUID_IS_REFERENCE = "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
UUID_NOT_REFERENCE = "F12384F6-CD17-4151-ACBA-AE0E3688539E"
UUID_DUPLICATE = ""
UUID_DETECTED_TEXT = {
"E2078879-A29C-4D6F-BACB-E3BBE6C3EB91": "osxphotos",
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": None,
}
@pytest.fixture(scope="module")
def photosdb():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB)
@pytest.fixture(scope="module")
def photosdb_local():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB_LOCAL)
def test_init1():
# test named argument
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init2():
# test positional argument
photosdb = osxphotos.PhotosDB(PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init3():
# test positional and named argument (raises exception)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(PHOTOS_DB, dbfile=PHOTOS_DB)
def test_init4():
# test invalid db
(bad_db, bad_db_name) = tempfile.mkstemp(suffix=".db", prefix="osxphotos-")
os.close(bad_db)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(bad_db_name)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(dbfile=bad_db_name)
try:
os.remove(bad_db_name)
except:
pass
def test_init5(mocker):
# test failed get_last_library_path
def bad_library():
return None
# get_last_library actually in utils but need to patch it in photosdb because it's imported into photosdb
# because of the layout of photosdb/ need to patch it this way...don't really understand why, but it works
mocker.patch("osxphotos.photosdb.photosdb.get_last_library_path", new=bad_library)
with pytest.raises(Exception):
assert osxphotos.PhotosDB()
def test_db_len(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert len(photosdb) == PHOTOS_DB_LEN
def test_db_version(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert photosdb.db_version == "6000"
def test_persons(photosdb):
assert "Katie" in photosdb.persons
assert Counter(PERSONS) == Counter(photosdb.persons)
def test_keywords(photosdb):
assert "wedding" in photosdb.keywords
assert Counter(KEYWORDS) == Counter(photosdb.keywords)
def test_album_names(photosdb):
assert "Pumpkin Farm" in photosdb.albums
assert Counter(ALBUMS) == Counter(photosdb.albums)
def test_keywords_dict(photosdb):
keywords = photosdb.keywords_as_dict
assert keywords["wedding"] == 3
assert keywords == KEYWORDS_DICT
def test_persons_as_dict(photosdb):
persons = photosdb.persons_as_dict
assert persons["Maria"] == 2
assert persons == PERSONS_DICT
def test_albums_as_dict(photosdb):
albums = photosdb.albums_as_dict
assert albums["Pumpkin Farm"] == 3
assert albums == ALBUM_DICT
def test_album_sort_order(photosdb):
album = [a for a in photosdb.album_info if a.title == "Pumpkin Farm"][0]
photos = album.photos
uuids = [p.uuid for p in photos]
assert uuids == ALBUM_SORT_ORDER
def test_album_empty_album(photosdb):
album = [a for a in photosdb.album_info if a.title == "EmptyAlbum"][0]
photos = album.photos
assert photos == []
def test_attributes(photosdb):
photos = photosdb.photos(uuid=["D79B8D77-BFFC-460B-9312-034F2877D35B"])
assert len(photos) == 1
p = photos[0]
assert p.keywords == ["Kids"]
assert p.original_filename == "Pumkins2.jpg"
assert p.filename == "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
assert p.date == datetime.datetime(
2018, 9, 28, 16, 7, 7, 0, datetime.timezone(datetime.timedelta(seconds=-14400))
)
assert p.date_added == datetime.datetime(
2019,
7,
27,
9,
16,
49,
778432,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
)
assert p.description == "Girl holding pumpkin"
assert p.title == "I found one!"
assert sorted(p.albums) == ["Multi Keyword", "Pumpkin Farm", "Test Album"]
assert p.persons == ["Katie"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/D/D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
)
assert p.ismissing == False
def test_attributes_2(photosdb):
"""Test attributes including height, width, etc"""
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert sorted(p.keywords) == ["Maria", "wedding"]
assert p.original_filename == "wedding.jpg"
assert p.filename == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
assert p.date == datetime.datetime(
2019,
4,
15,
14,
40,
24,
86000,
datetime.timezone(datetime.timedelta(seconds=-14400)),
)
assert p.description == "Bride Wedding day"
assert p.title is None
assert sorted(p.albums) == [
"AlbumInFolder",
"I have a deleted twin",
"Multi Keyword",
]
assert p.persons == ["Maria"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
)
assert not p.ismissing
assert p.hasadjustments
assert p.height == 1325
assert p.width == 1526
assert p.original_height == 1367
assert p.original_width == 2048
assert p.orientation == 1
assert p.original_orientation == 1
assert p.original_filesize == 460483
def test_missing(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert len(photos) == 1
p = photos[0]
assert p.path is None
assert p.ismissing == True
def test_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == True
def test_not_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == False
def test_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == True
def test_not_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == False
def test_visible(photosdb):
"""test visible"""
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.visible
def test_not_burst(photosdb):
"""test not burst"""
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert not p.burst
def test_location_1(photosdb):
# test photo with lat/lon info
photos = photosdb.photos(uuid=[UUID_DICT["location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat == pytest.approx(51.50357167)
assert lon == pytest.approx(-0.1318055)
def test_location_2(photosdb):
# test photo with no location info
photos = photosdb.photos(uuid=[UUID_DICT["no_location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat is None
assert lon is None
def test_hasadjustments1(photosdb):
# test hasadjustments == True
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == True
def test_hasadjustments2(photosdb):
# test hasadjustments == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == False
def test_external_edit1(photosdb):
# test image has been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == True
def test_external_edit2(photosdb):
# test image has not been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["no_external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == False
def test_path_edited1(photosdb):
# test a valid edited path
photos = photosdb.photos(uuid=["E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51"])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path.endswith(
"resources/renders/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51_1_201_a.jpeg"
)
assert os.path.exists(path)
def test_path_edited2(photosdb):
# test an invalid edited path
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path is None
def test_path_derivatives(photosdb):
# test an path_derivatives
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_derivatives
derivs = [
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_100_o.jpeg",
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_105_c.jpeg",
]
for i, p in enumerate(path):
assert p.endswith(derivs[i])
def test_ismovie(photosdb):
# test ismovie == True
photos = photosdb.photos(uuid=[UUID_DICT["movie"]])
p = photos[0]
assert p.ismovie
def test_not_ismovie(photosdb):
# test ismovie == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
p = photos[0]
assert not p.ismovie
def test_count(photosdb):
photos = photosdb.photos()
assert len(photos) == PHOTOS_NOT_IN_TRASH_LEN
def test_photos_intrash_1(photosdb):
"""test PhotosDB.photos(intrash=True)"""
photos = photosdb.photos(intrash=True)
assert len(photos) == PHOTOS_IN_TRASH_LEN
def test_photos_intrash_2(photosdb):
"""test PhotosDB.photos(intrash=True)"""
photos = photosdb.photos(intrash=True)
for p in photos:
assert p.intrash
def test_photos_intrash_3(photosdb):
"""test PhotosDB.photos(intrash=False)"""
photos = photosdb.photos(intrash=False)
for p in photos:
assert not p.intrash
def test_photoinfo_intrash_1(photosdb):
"""Test PhotoInfo.intrash"""
p = photosdb.photos(uuid=[UUID_DICT["intrash"]], intrash=True)[0]
assert p.intrash
assert p.date_trashed.isoformat() == "2120-06-10T11:24:47.685857-05:00"
def test_photoinfo_intrash_2(photosdb):
"""Test PhotoInfo.intrash and intrash=default"""
p = photosdb.photos(uuid=[UUID_DICT["intrash"]])
assert not p
def test_photoinfo_intrash_3(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(uuid=[UUID_DICT["intrash_person_keywords"]], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_4(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(persons=["Maria"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_5(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(keywords=["wedding"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_not_intrash(photosdb):
"""Test PhotoInfo.intrash"""
p = photosdb.photos(uuid=[UUID_DICT["not_intrash"]])[0]
assert not p.intrash
assert p.date_trashed is None
def test_keyword_2(photosdb):
photos = photosdb.photos(keywords=["wedding"])
assert len(photos) == 2 # won't show the one in the trash
def test_keyword_not_in_album(photosdb):
# find all photos with keyword "Kids" not in the album "Pumpkin Farm"
photos1 = photosdb.photos(albums=["Pumpkin Farm"])
photos2 = photosdb.photos(keywords=["Kids"])
photos3 = [p for p in photos2 if p not in photos1]
assert len(photos3) == 1
assert photos3[0].uuid == "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
def test_album_folder_name(photosdb):
"""Test query with album name same as a folder name"""
photos = photosdb.photos(albums=["Pumpkin Farm"])
assert sorted(p.uuid for p in photos) == sorted(UUID_PUMPKIN_FARM)
def test_multi_person(photosdb):
photos = photosdb.photos(persons=["Katie", "Suzy"])
assert len(photos) == 3
def test_get_db_path(photosdb):
db_path = photosdb.db_path
assert db_path.endswith(PHOTOS_DB_PATH)
def test_get_library_path(photosdb):
lib_path = photosdb.library_path
assert lib_path.endswith(PHOTOS_LIBRARY_PATH)
def test_get_db_connection(photosdb):
"""Test PhotosDB.get_db_connection"""
conn, cursor = photosdb.get_db_connection()
assert isinstance(conn, sqlite3.Connection)
assert isinstance(cursor, sqlite3.Cursor)
results = conn.execute(
"SELECT ZUUID FROM ZGENERICASSET WHERE ZFAVORITE = 1;"
).fetchall()
assert len(results) == 1
assert results[0][0] == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51" # uuid
conn.close()
def test_export_1(photosdb):
# test basic export
# get an unedited image and export it using default filename
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_2(photosdb):
# test export with user provided filename
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_3(photosdb):
# test file already exists and test increment=True (default)
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
filename2 = pathlib.Path(filename)
filename2 = f"{filename2.stem} (1){filename2.suffix}"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_4(photosdb):
# test user supplied file already exists and test increment=True (default)
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
filename2 = f"osxphotos-export-2-test-{timestamp} (1).jpg"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_5(photosdb):
# test file already exists and test increment=True (default)
# and overwrite = True
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_6(photosdb):
# test user supplied file already exists and test increment=True (default)
# and overwrite = True
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_7(photosdb):
# test file already exists and test increment=False (not default), overwrite=False (default)
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
got_dest = photos[0].export(dest)[0]
with pytest.raises(Exception) as e:
# try to export again with increment = False
assert photos[0].export(dest, increment=False)
assert e.type == type(FileExistsError())
def test_export_8(photosdb):
# try to export missing file
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos[0].export(dest) == []
def test_export_9(photosdb):
# try to export edited file that's not edited
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest, edited=True)
assert e.type == ValueError
def test_export_10(photosdb):
# try to export edited file that's not edited and name provided
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
with pytest.raises(Exception) as e:
assert photos[0].export(dest, filename, edited=True)
assert e.type == ValueError
def test_export_11(photosdb):
# export edited file with name provided
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename, edited=True)[0]
assert got_dest == expected_dest
def test_export_12(photosdb):
# export edited file with default name
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
edited_name = pathlib.Path(photos[0].path_edited).name
edited_suffix = pathlib.Path(edited_name).suffix
filename = (
pathlib.Path(photos[0].original_filename).stem + "_edited" + edited_suffix
)
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, edited=True)[0]
assert got_dest == expected_dest
def test_export_13(photosdb):
# export to invalid destination
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
# create a folder that doesn't exist
i = 0
while os.path.isdir(dest):
dest = os.path.join(dest, str(i))
i += 1
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest)
assert e.type == type(FileNotFoundError())
def test_export_14(photosdb, caplog):
# test export with user provided filename with different (but valid) extension than source
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export_tif"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.tif"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
assert "Invalid destination suffix" not in caplog.text
def test_export_no_original_filename(photosdb):
# test export OK if original filename is null
# issue #267
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
# monkey patch original_filename for testing
original_filename = photos[0]._info["originalFilename"]
photos[0]._info["originalFilename"] = None
filename = f"{photos[0].uuid}.jpeg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
photos[0]._info["originalFilename"] = original_filename
def test_eq():
"""Test equality of two PhotoInfo objects"""
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["export"]])
assert photos1[0] == photos2[0]
def test_eq_2():
"""Test equality of two PhotoInfo objects when one has memoized property"""
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["in_album"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["in_album"]])
# memoize a value
albums = photos1[0].albums
assert albums
assert photos1[0] == photos2[0]
def test_not_eq(photosdb):
photos1 = photosdb.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos1[0] != photos2[0]
def test_photosdb_repr():
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = eval(repr(photosdb))
ignore_keys = ["_tmp_db", "_tempdir", "_tempdir_name", "_db_connection"]
assert {k: v for k, v in photosdb.__dict__.items() if k not in ignore_keys} == {
k: v for k, v in photosdb2.__dict__.items() if k not in ignore_keys
}
def test_photosinfo_repr(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
photo = photos[0]
photo2 = eval(repr(photo))
assert {k: str(v).encode("utf-8") for k, v in photo.__dict__.items()} == {
k: str(v).encode("utf-8") for k, v in photo2.__dict__.items()
}
def test_from_to_date(photosdb):
"""test from_date / to_date"""
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(from_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 16
photos = photosdb.photos(to_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 7
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28), to_date=datetime.datetime(2018, 9, 29)
)
assert len(photos) == 4
def test_from_to_date_tz(photosdb):
"""Test from_date / to_date with and without timezone"""
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28, 13, 7, 0),
to_date=datetime.datetime(2018, 9, 28, 13, 9, 0),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
photos = photosdb.photos(
from_date=datetime.datetime(
2018,
9,
28,
16,
7,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
to_date=datetime.datetime(
2018,
9,
28,
16,
9,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
def test_date_invalid():
"""Test date is invalid"""
# doesn't run correctly with the module-level fixture
from datetime import datetime, timedelta, timezone
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
delta = timedelta(seconds=p.tzoffset)
tz = timezone(delta)
assert p.date == datetime(1970, 1, 1).astimezone(tz=tz)
def test_date_modified_invalid(photosdb):
"""Test date modified is invalid"""
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
assert p.date_modified is None
def test_import_session_count(photosdb):
"""Test PhotosDB.import_session"""
import_sessions = photosdb.import_info
assert len(import_sessions) == PHOTOS_DB_IMPORT_SESSIONS
def test_import_session_photo(photosdb):
"""Test photo.import_session"""
photo = photosdb.get_photo(UUID_DICT["import_session"])
import_session = photo.import_info
assert import_session.creation_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
729811,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.start_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.end_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert len(import_session.photos) == 1
def test_uti(photosdb):
"""test uti"""
for uuid, uti in UTI_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.uti == uti
assert photo.uti_original == UTI_ORIGINAL_DICT[uuid]
def test_raw(photosdb):
"""Test various raw properties"""
for uuid, rawinfo in RAW_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.original_filename == rawinfo.original_filename
assert photo.has_raw == rawinfo.has_raw
assert photo.israw == rawinfo.israw
assert photo.uti == rawinfo.uti
assert photo.uti_original == rawinfo.uti_original
assert photo.uti_raw == rawinfo.uti_raw
def test_verbose(capsys):
"""test verbose output in PhotosDB()"""
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB, verbose=print)
captured = capsys.readouterr()
assert "Processing database" in captured.out
def test_original_filename(photosdb):
"""test original filename"""
uuid = ORIGINAL_FILENAME_DICT["uuid"]
photo = photosdb.get_photo(uuid)
assert photo.original_filename == ORIGINAL_FILENAME_DICT["original_filename"]
assert photo.filename == ORIGINAL_FILENAME_DICT["filename"]
# monkey patch
original_filename = photo._info["originalFilename"]
photo._info["originalFilename"] = None
assert photo.original_filename == ORIGINAL_FILENAME_DICT["filename"]
photo._info["originalFilename"] = original_filename
# The following tests only run on the author's personal library
# They test things difficult to test in the test libraries
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_not_visible_burst(photosdb_local):
"""test not visible and burst (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["not_visible"])
assert not photo.visible
assert photo.burst
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_visible_burst(photosdb_local):
"""test not visible and burst (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst"])
assert photo.visible
assert photo.burst
assert len(photo.burst_photos) == 4
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_key(photosdb_local):
"""test burst_key"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_key"])
assert photo.burst_key
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_key"])
assert not photo.burst_key
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_selected(photosdb_local):
"""test burst_selected"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_selected"])
assert photo.burst_selected
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_selected"])
assert not photo.burst_selected
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_default_pic(photosdb_local):
"""test burst_default_pick"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_default"])
assert photo.burst_default_pick
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_default"])
assert not photo.burst_default_pick
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo(photosdb_local):
"""test path_edited_live_photo (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live_edited"])
assert photo.path_edited_live_photo is not None
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo_not_edited(photosdb_local):
"""test path_edited_live_photo for a live photo that's not edited (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live"])
assert photo.path_edited_live_photo is None
def test_is_reference(photosdb):
"""test isreference"""
photo = photosdb.get_photo(UUID_IS_REFERENCE)
assert photo.isreference
photo = photosdb.get_photo(UUID_NOT_REFERENCE)
assert not photo.isreference
def test_adjustments(photosdb):
"""test adjustments/AdjustmentsInfo"""
from osxphotos.adjustmentsinfo import AdjustmentsInfo
photo = photosdb.get_photo(UUID_DICT["adjustments_info"])
adjustments = photo.adjustments
assert isinstance(adjustments, AdjustmentsInfo)
assert adjustments.asdict() == {
"data": b"mW[\xb7\xa2:\xb3\xfd/\xbe\xda\xa3\x17((\xf4\x18\xdf\x03H\xc2E\xb9%\\\xc4\xb3\xce\x03\x02\x12.\x82\n\x1at\x8f\xfd\xdf\xbf\xb8\xba\xfb\xec\xdec\x1c\xde\x92\xaa\xcc\x9aU\t\xa9\x99\xbff\x8f\xe26T}gv\xa7~\xf6\xe3\xaf\xd9\xf1^\xb5\xb9s?\x1f\x8b\xdb\xec\xc7\x8c\x97\xf5\xf5r\xf6m\x96^.\xd1O\xbf\xf7\xe4\x8a\xff\xce}\xe7\x17\x1c3\x0c\x19)\xce)*\x1e\xd5O#\xffmvi\xd3\xf1\xd4\xdf\xce\xcc\xd3\xc5\xfb\xd9\xdf\xdff\xe7bL\xf3tL\xdf\xf8\xe7t\x18\x8b[\\\xe5#\x99\xfdXr\x0b\x81-\xa8.E[u\xc5?\x11\xd8\xba\xef\x02C\xff\xe9l\x14UI\xc6\xd9\x0f\x81[.\xbe\xcd\xfa[Utc:\xfe\x0c\xc7\xd0\xdf\xb1\xd2\xf1\xff\x163\x06i^\xdf\x87\xf1\xcc\xdc\x86\xd9\x8f\xff\xf9\xeb\xff\xf1(\xba\xf4\xd8\x16\xf9\xec\xc7x\xbb\x17,\x8bb\x1c\xab\xae\x1c\xde\x04\xfb\xd3\x89\rw}\x96\xb6\xbb\x9fq\xb9o\xbf&\r6n\xdfs\xc3\xd7d\xd5]\xee\xe3o\x9f\xefKn\xbd\x14\xc4\xe5\x8a\x93\x16\xc2ZX,\xe4\xdf\xab\xc0t\xe9\x87\xfb\xad\xf8\x03Hm\xd3\xac\xf1\xfa\xaa\xfb\x13]\xbd\xbd\xa1\xbab\xf8\x89>\xbcs\x1c\xc6*\xfbbu\xe1\x16\xef \x1c\xb7\x96\x84%\xbf\\/DA\xe6xy\xc5\xadY\xfdD\xee\xcb&K\xdcR^\xf0\xe2JZ-\xd6\x82\xc8I\xac\x12\xf7\xb1\x8f\xd2\xf6\xfe\x0e\xfe}!\x89+\xee\x8f\x8f\x15\xf3\xf8'\x11\x86\xbe\xe4\xe5\xf5J\xe4Y\xa5EYZ\xf0k\xf1\xdbl\xec\xbb\xb4EiW\x16\xbf\x82\x08\xe2j\xcd\t\xb2\xb4\\\x8bk\xf1\xbd}\x0b\xf1\xcb\xb2\x14\x17\xb2\xc0\xf3\xeb\x95\xb0\xe6DIZ,\x99I\x96\xde&Q\xfe\xf7\xc7\x88}\x95\xd1N/l\xb3at\xd9\xe6\xdc\xe5\x88\xa3\xc6\x8f\x15q\x8f\xf8\xc6\x89U'\x860\xb9\xda\x1b\xf7b\xc1\xf2\x18\xab\xe7;\xe4\x13Ro\x82\xb5%\x83\xaa\xe1\x0e\xc4\x8c-\xd8\xf2\x9e\x19\xe9m\x9c\xf2\xf9\x18\xc7r\x9a\xb5\xfcb\xbfl\xb5\xcf\x0fbQ\xad\r\xbd\xa8\xc9\x13\x0bf^\x84\x94\t\xaa\x073\x06$\xd1#\x07\xc4\xaa\xb5\x07m\x92\xc4\x1b\xdd\xb4\xd2\xd6I\xa6G\t\x97Jy\x0co4\xcc\xc5\x88\x8f\x0eC\xb4\xe0\x0fG\xfe2\xed\x8d\xe8T\xa8gM\xc3\x8d\x13Q1fD\xa2H\x831\xe2s#\xe2\xc8\x1e\xc3\x9c\xe1\xb6\x0c\xb7\t\xe2\xe6fz\xe9\xf0\xf8\xfc\x08\xd7\xa2\xc6\x0f\xdeAEcx>\x84)\x8c\xae\xd1\x83\x1b\x86Mm\xc5\xa7)k[Q\x80Op\xc0\xaa\xca\x80\x92c\xa46\x19\x08\x84\xd0\x00\xf9\x1eG\xc4b\x80\x07\xdc\xb6\xdb\x98\x1b\xb3\x00\xf2\xf6\xbe\x8aJt\x02\xce\xa6\x94[\xb7C\xf8\x14\xa1>\xd2/Q\xf3,??\xb6\\\x98!\xd2p\xa1\xd7\xbb\xa6j\x9d\xd0\x9c1\xa3\x9c\xa3\xbd\xec\xd4P\xe5\x04\xc3\xdf\x80\x97m\xdc\x8c\xc7/\xc0F,\x83\x05\xf4\x92\x92\xd3\xb5\xd8\xe7\x1fZ\xf4\xf9\x11\x19\xf6\xa2\xdc\xc0!\x12\xac\r?\xc5%L\xa5\x90\x12\x13C\xd5\x0c\xa3\t\xed\xdd\xb8\xc7\x11\xaa\xb6x\xab\x9aI\xf3\x8ba\xc3\xf6\x8e\x9f\x18 \x7f\xfa\x02$\xacV~\xe8\xc4\xad\xb5rt;\xcc\x91\xca;\xb2\xb2\xa7\x93\xdb\x81\xa7\x1f\x00b#\xad\xc9\xf6\x08e!\x8c\xca\x18?\xbd\xc2J\xb3\xea\x10^\xaa/\x82\xdc\x9b \xc3\x0b\x7f\xe1\xb5\xb0\xd1\xe2\xc4QK\xf1\x1ey\x02r\xc9\xd6\x02HA\x00\x99\x18t~\x98\xf3\xa2\x94$!\x8a&'\x82\x93\xbf\xe7P\xbe\x87\xe7\xb2\xfd\xfch\x96\x9f\x1f\xf8!\xff\xc30\xe4\x8b\xdf\x88\xe1\xdevsU\x1c\xbdk\xc96\x8b\xce\xe5mB\xaf=l\xb9\xb8s\x8e7^\\\xb2cD\xae\xefc\xd9\xf6\xfb\x18E7k\xa4\x97X\x9b\x9f\xf0]Y\xed\xc1\xa5\xfb\xaa!\xf7\xab\x86<l\xbde\xdf\x1fp\x1e\x9a\xb1\x99\x14jG\xf4s\x9f\x132\xef\x8d.\xa9m\x1c\x1fL\xbd\xd9?T\xb0\xc3\x9f\x1f\xd6\x96\x01\x1c\xf5\xa6\x8coj\xb1E)\xb1W\xcd\xeb\x10\xe4\xb2\xcbq\x9f\x1fy0w|\x9e7\x82p'\x04\xe5\xa4\x10\xedI\x91\x8b@\x0c\xe2\x81\xac'\xbf5_\xc3\x0b\x05H\xb79\xfb\xee\xa1q\x05\xfa\x88\xa56\x15\x10R\x0f(\x92\xab\xbd|\x84\xc8\x0e\x82\x81\xe2;\xd9J\xc6\xc5?f\x13}\xc0'\xf5\xfcR8i1\x87_\xca<\xd5(\xf5\x81\x1a>\xb5)\xb9x5\xef\xfaP\x91\x02\xed\x00\x1c\xa7\xbf6\xe1\x93B\xc8!\x8d2<\x02|\x80\x8c\x1e\xc4\nN\xc8Xou\xfb\xe2W\xc9\xc2|\xf9\xc7\xb4\x94oo\x1c\x9d\nX#\xbd\xa3Q\x0eCl\x16\xce\xb3a\xd9\xc8\x9b0\x18\xed\xddR\xb4\x1f\xaf+\x82j\x883\x04\xcf\xf0\x98\xc5t\xf2}\xfd\xe4xm\xab\xd6a\x1c\xde\x0e\xf8\xd0\x99\xe7KtT\xa31\xea\x14'\xf3\xb9\x9d\x86\xedt\x8b\xc1`\xe2\xbe\xb6kE\xb2_bV@Q4\xba\xa6|Vk\xdf\x16{O#\xd3\x11l\xa8g\xa2tm\xb8M\xb8\xa6\x82\xa9\xf9\x99WD\x8el\xb8y\x9c\xc1v\x02\x9d\xe2\xea>54\xc4\x9d\xed']\xee\xb4\xecfW\r\xb55n(\xf4\x8d\x9d\xec\xe9\xe3\xa4\xae6\xd66\xaa\x16j\x04\xe1\xa8`\xaa|~\x9c\xb4K\xef\x18>\x97\xb3\x04=\xb1\\\x9c4?q6H\xe6\xad\x8b\xe9\xe5\x94_j\x88\x01\xe3Ar\xb8\x90\xf3kG\xd9\xd5\xc3\xdd\xc5D\xda\xdf\x9d\xbal\nEOh\xd9U\xaf\xb3\xc1\x9b\x87\x0b\xe9pp:\xf7s\xfa\xf9!k~co\xc9\xee\xbc=\xd9\xaeD\x17\x08t\t\xceU\x93U\x88\xc3\xa6B\x91\xa5\r\x12\xae\xc7\xad\x0b\x92\x97\xaf\xeb\xca\xc1TV\xb5\x9en\"\xc1\xce\xab\xca\x9ao\xe5vs\xf3\xe5\xd1\x08\xedC\x80^km\x0e\x1c\x80\xfc\x00\x9at\x7fUwW\xb0\xf5#\x1d5\xa5\xb1\xf1s\x0bq\x9d\x86\x04g\xfbl\xc16,/h\xe3K\x9a\x00\xcf\x04^\xdd\x83\xec\xd4\x15\xfb[\xf5CHe\xd8yZ*\xf9W\xb5s\\;C\x13\xa2\x9d^\xdby\x82\xe8IG}\xa8W`\xb0j\xe5\xe6\xe0\x86\xb74\xff\xb4+\xb9-$\xb4\xddm\x86\xa7\xf6R<XJN\xd8\xb7\xe7J\xbf\xdb\xbb\x8bTw\x9bMnm\xedC\xab\x82\x01\xa8\x12\xf6\xc8\xba6p\xc6\x9aj\xf2\xb04\xb3\xde=\xc1k\xfb\xa2/\xa49\xd0\x0e\xfd\t\xa9\xe0\xc5\xae\x86\xbdNh\xb7\x05\x19\x06\x08\xc8 \xc8p\xcd\xeb^jEq3U\xae\xd1\xd3\xa2\x9f\x9a\x0b\xab\x93\xab\x95,\xaf\xa7];XX\xdb5\xf7\xf4jen\x06!\xf1\x83\x8b\xebE@\xc4\x94\xdf\x00\x9f\xdb\x9b\x1b\xfbaa\xe1\x9a\x92\xc8\xb1Z*\xe4H>oa\xd6\x1c\x9e\x88\xd7\x0f\\\xe0=]b\xc0\xc4\x06T:\x00\xd5\xce-l\x9e\x8d\xba'^\xe5(\xb6&\r\xdef\xe0vA\xd38%w\xd4\xd4\xcc\x86\xa8<\x1b\xb8\x19\xdc\xe7+\xb7l\xa5H7\x9f\x1f\x9e)\x84\xdd\x15G\x9e\xb1\x14B\xa2:\x1bm\x11z\x16\x95\xaf`\x1a\x12\xf3iwf\x15\x12\x0b\xfbw\xebE\x9f\xbe\x16iv\xc0\xdd]FL#\x99m\x12?d'\xa9\xf3\x02K\xd8\tM\xfd\xa8\xf2\x87\xed\xf4\xf7\xb6zB\xeb<\x90+\x19\x1f\xe0U\x1e\xdb\xa9-\xad\x8e\xbb\xd4\x15\xb8\x9aUYoqx\xb3\x96\xc3<\xa8y\xc7i\xc2\x97_\x8d\x0b\xad51+\x8c\x03\xf7\x8a\xbd\xa1R\xae\x83\xe1\xd4\xd4\x05\xeb\x10FY\x9dqT\xeen\xef\x8bw\x15\x80[\xe6e\xd3\xb8\x84:%5Y,\xe1\xb6\xef\xec*\xa7\x10daG\xa5\x07\xd8J\xfe\x86\xa8\x9e\x9e\xf5\x8e:\xd9Xk@\x98*B\xc8\xda\\\xecM25Rp~ME\x0ey\xe5\x18\xa1\xf6\xa2\x9f\x95\xb4F\xb06\xac&\xca\xa6'6;.\xa8H\xfe\x04\xad\x8dw\xea\x1e[n\x92\xac\x91\x12\x03\x7f@\x83\xcf\x19\x10%\xaeG\xec\x03\x14\xc2C\xa9\xa6\x8a\xde\xd2r\xc2\x81\x06\xd3&&\x9b\xb8\x85\x87d\x9f\x93C\xa3\t\xa6\xb3\xf7\xe5J[\x8c\xf9\x92\x8a\xaca\xf6N\xe4\x7f~\xa0\x9d\x9c\xe1\xfbt2!l\xfcM)\xed\xd9\x11\x0fu\x94\xabz$\x9c\x86\x89\xdca\x96\x8cu\xa5%\x86I\x8f\x15\xa9\x00\x10}tDQ\x0b\r\x13\x87>\x1f\x00Xz\xa9\xb2\xc84A\xc1\x13\x95\x1b\xd8\xd3KG\x9e;C\xe7\xc8\xb1\x94\x13\x8d\x96\xac\xd7r\x9e\x1e\xf5\xa4\xc4\xee\x1a\x8a\xc2\xbe$\x0f\x15\xf6\xe1\xfeL\x12Y7)k\xe3\x0e\x01K\xc1\xb3\xd1\x96\x80\xa2q'*\xde\xb5'\x13\t\x04\xae\xa04\xdc\xb8MLv\x17\x9f\xff\xfcx\xee\xe6\xc6\xb5t7\ngh\xe1p\x1d\xab\xfb\xd3b=kD\x16\x81\xfb>H'\xa7\xd78\x01\x17\xaa\xab\x02\xd1\x0e\x11\x02s\x80\x05\x8f\xdd\xa6;v\xabF\x90\xca>\xb8\x98~J\x9e\x0bm! \x7f\x82\x0b\xe0\x0c~\xad\x08\xecW\x0c]\xaf2\xac\xad\xe9G)\x95\xae\xe0\x9c\xb0}\x96(\xe8B/\xa4\xbc\x08\xf6\xe10 H@\x04\xfc\x145Gv\xd7\xd8\x9a2?\x82\xbd\x106\xc8\xe2uI\xc9\xee\xbe|\xd2T!H\xe9<c\xb7\xa7\xa3\"G\xd5G;{a\xd70\x85$\x08\x118\x81\xa8\xd97\xea$\x81\xde\x0f:\xe4\xdc\xb5\xaew\xacR\xa0\xa0\x1d\x9c\x04\xc55\x90l\x9c<\xbd (\xa0uW\x16\xa5\xa6\x84N\xed\xcfc\xed98*\xe5,\xa3m\x10xv\x08\xae\x92\x82\xado\xc0A\xf1v\xbe\xbc\xd5\xf7\xc0c\xdd\x12k\xcb\xd2;\x95\\\xa9-\xfb\xff0\xe9\xdf\xbe\x05\xb8\xf2\xa7|]\xfeK\xbcr\x1c\x93\x9e\x94Tc\xf1K\xbe\xf2o\xf9\xfa\x87\xfc}\xbfD\xf8\x9f\xc2\xf8\x1fI\xfcK\"\x7f\x9b\x11\xa6?\xb7\xc5\xf3m\x96\xb8\xd5R`\xb2\x9d\xe9vQ^I\xd2\xfa\xef\xdf\x8a|\xd3w\xe3\x8d=A\xfe\x10\xe9\x98\xa4yO\xdf\n\x9dyU9{bT\xa7\xea\xeb\xa9\x84\xcf\xe9m\x0c\xfa\xae\x98\xfd\xfd\xbf\x7f\xff\x17",
"editor": "com.apple.Photos",
"format_id": "com.apple.photo",
"base_version": 0,
"format_version": "1.5",
"adjustments": [
{
"formatVersion": 1,
"enabled": True,
"settings": {
"offsetLocalLight": 0,
"offsetHighlights": 0,
"inputLight": 0.3073453608247423,
"offsetExposure": 0,
"offsetBlackPoint": 0,
"offsetBrightness": 0,
"statistics": {
"p02": 0.00784313725490196,
"p50": 0.09803921568627451,
"autoValue": 0.2856,
"blackPoint": 0.0031976514035982175,
"tonalRange": 0.09845670498375754,
"p25": 0.03529411764705882,
"p98": 0.6,
"lightMap": "FVpKd0pbSVkQWA5XR1kNWBNWFFYqMCOpJFgbWBmuF1YhjCT7J9Eik0ZhIWJFl1PIVGlWa1dtWW9acl12X3lD/hJwDlUPVkdYJFcPVRAxFBZIWEhYGVNEWBJXElYYWCGIJalNYxvgF3AgbUrwUd5V1VZsV21Zb1pxXHVfeBmDDSkNVw5WF1YVVDFWR1dHV0hXSFdIWElYGVkTWkrIPasv/U75D1sPZBRtUmZUaFVqVv0ssCjJWfxcll54FyEZSBBWR1YbVBkcET4UHEdXSVhJWElZSllKW0tcTF1MXiVgRfENCg9lOnRSfVRoVGpVkyg/K0UcRhk0UPoOIBJfR+dHVw0NDzMaHB9YSFhJWElZSlpKWktbTF1MXk5gT2FPYg0GDWQ1vDV/VHM2gCFsV4JC1xWgFa8UwhISFBIUVxRXOWoSVRiKSKBIYklZSllKWkpbS1xMXk1fT2FPYhBmDQUNWlJ6NGMUdRB1N9AXwxOnEyQTEhMRDkcXRRcUFVgWSyPeJaciZUpiSlpKW0tbTFxMXU1fT2FPYlFkDWYNBg5uVP4ROhKJERARERISEnQUd158YYURVxNVFxQX0RdXFmgl/k3/Sv9KWkpbS1xMXU1eT2FPYlFkUXMOdB5tPqgv/w+9KYwqoFl0WnNbr153X3lhq0pbSloXWRVrJtwpWD+fSuA6XEpnTF1MX05gT2FPY1FlP3ooZSdUIWIYeBnhGmodhh+oHnYjMSWZIGkXvBELS/JKXEpbGkgWrBeKRahM6kzZTd9O00/dT+NQ11HTUL4TgxAhDywROREWEWsh7xQlIzszRTRGM0MuPRt6EoVMXUxeFFwPEA8ODzQRRhLFEswSuhK8HpQbcxwvFywPQg4fDW0SzA+aDwwQEBUyDxYpPj1OQFA8TDZENNoqkUywFF0RDw8ODhEQERHoEWASYhtjKGMpQiY2IzQbag9rDwwQGw4SDhoNDw0SFSIeNyk9O09CUTtML35MvzqRFBUScRFmFbcWwxQQGfNPllBjUWUrZSZnImpVbBVtVnANcQ0LDSMaKSEsISojMjA8Mz5ceF55Hnkgyi7QM5oPDhxbECwPIRa7HOkU7w4IDQcPeVN9HOdWcFlxEnAOGQwHDR0mMyw3LDcrMikwMD0seGCMYXwvfB6CJKVi2BVFFtASwA/fDpoNHQ0dDwwP5g2fDQYNCR91JpIPdw13DRAOGSs8N0U0QjNALjsuOSh8XuNjgkeAGYwgnizmH1IZphnSTfmo+w/9DQkMKhLmKfMO8w2REnYSdBIRFB0SIAwRJDs8SjtKOEYzQGGAZIA6jGaCV4MdiiJ+K9lCrQ9tHUMZTRz7D+ENERQTFIwXqBLqEKQVGRkgHCQdJR0nDR4NKylEKUgpRCQ8D4FmhFqOZ9NjiBmDGZUw9FnPDa8QqBnNOMcRxRwnGjMdYRwfGRoUGiEsJjArNSk1JDQfLg0KFhwlLCsyDzAPFg8NUolmiGuMLp8jnCCdJKMZlBEsEB8SPh7jHSclLiYvJDIjLyEzKzwzRDNFMUQxRBEzEhMXGhwnKEcSERE9ETcSj1GPaJVWkxiOHoweoxkpFB0ODg0nDyMjNS47Mj0yPjA+ITUhQTpOPVE5Sw1CEQ0XICMvJS4qahVNJlw4dR9mKFckZyR1GZ0TPyOhHFYMEw8ZEBMdJSImHjohPiNAMD8sPCs0LTkkNg0bDBcMFRgmHSksOyzdJMAeaC/PI4UnqSVPH34UhBNCD2UPJw9qExsYIyMnIiUhJSQuJzwyQDVDMT0uOCMvDhcMIhQUDRAnPTJ4L4kjvidvMNouliyFJmshqhtvEzgblxlgJn0pjiEqIigjKSUrJ3s+Tj1NNkUzQit2DlISDg0NFXAMCw8dGEsfkje/KHgimSVgLrcXRR6TErcPcxt3FGwhjh23FKonMidwFEcUnw8vEK8QChBPGcoNBxMSDkEUaA4UElYWPx9wHaEmzxedF1AbVRlpGmAajRFjHJkVcxySIn0TihdyElMSLBXSJOYY7RAWEQsRsQ0HFRYOPhMZF4UZgBaAGlwgxSTDFakWhCWlFZYXdhZkD4INXQ9iD2td3w5yEZoNVQ/RL9cSuxfIFFkQCg8XDR4UGRdBGV4fsxhuFcYtjiDYHIwbihiEE5QRbRVlFHISUQ1TEFgPaA2cD4ASxw9kFowpnhyLHG0hbg9YDwgNCg0PGVohgSO7F54XghvBFoUXmhY9GIwWfxNhE34PMRKhEekOxw5uDykNVhF6F8sr0CWhLpQ1/yL+HqgOCA0HDUsqtiuyJYYUtRJhFXoTaxNoD04SeBOBE5MURRE+ES4PDw0LDhoVFw9QEpIQahy2D24RQxF2ENsQjA4JDQUOPiHJKIQVaw8qEmYSVg8wEnUPUw15EXUssRFhEVEQaRkbEnYMDA+bEX4UkRJ1G8AcuQ9fDB4Taw+cDQcNBRNBGtMczSOHI4YTUREfEVkXkBx8EoQTnRNuDnoNJg4wElsNYRWjE8MSYyPTTeFJuA2gDAUNjQ+WDysNBw0JHlkREynRF6YenRNkEZAPLQ9KGXEPnhGSD3gPfg0gD3o=",
"localAutoValue": 0.36000000000000004,
"whitePoint": 1.003921568627451,
"p10": 0.01568627450980392,
"highKey": 0.8063460882459689,
},
"offsetContrast": 0,
"offsetShadows": 0,
},
"identifier": "SmartTone",
}
],
"metadata": {
"masterWidth": 3024,
"pipelineVersion": "OSX.4",
"masterHeight": 4032,
"orientation": 1,
},
"orientation": 1,
"adjustment_format_version": 1,
"version_info": {
"buildNumber": "19G73",
"appVersion": "161.0.120",
"schemaRevision": 1,
"platform": "OSX",
},
"timestamp": "2020-10-03T22:54:20+00:00",
}
def test_no_adjustments(photosdb):
"""test adjustments when photo has no adjusments"""
photo = photosdb.get_photo(UUID_DICT["no_adjustments"])
assert photo.adjustments is None
def test_exiftool_newlines_in_description(photosdb):
"""Test that exiftool handles newlines embedded in description, issue #393"""
photo = photosdb.get_photo(UUID_DICT["description_newlines"])
exif = photo._exiftool_dict()
assert photo.description.find("\n") > 0
assert exif["EXIF:ImageDescription"].find("\n") > 0
@pytest.mark.skip(SKIP_TEST, reason="Not yet implemented")
def test_duplicates_1(photosdb):
# test photo has duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["duplicates"])
assert len(photo.duplicates) == 1
assert photo.duplicates[0].uuid == UUID_DUPLICATE
def test_duplicates_2(photosdb):
# test photo does not have duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["no_duplicates"])
assert not photo.duplicates
def test_compound_query(photosdb):
"""test photos() with multiple query terms"""
photos = photosdb.photos(persons=["Katie", "Maria"], albums=["Multi Keyword"])
assert len(photos) == 2
assert UUID_DICT["multi_query_1"] in [p.uuid for p in photos]
assert UUID_DICT["multi_query_2"] in [p.uuid for p in photos]
def test_multi_keyword(photosdb):
"""test photos() with multiple keywords"""
photos = photosdb.photos(keywords=["Kids", "wedding"])
assert len(photos) == 6
def test_multi_album(photosdb):
"""test photos() with multiple albums"""
photos = photosdb.photos(albums=["Pumpkin Farm", "Test Album"])
assert len(photos) == 3
def test_multi_uuid(photosdb):
"""test photos() with multiple uuids"""
photos = photosdb.photos(uuid=[UUID_DICT["favorite"], UUID_DICT["not_favorite"]])
assert len(photos) == 2
def test_detected_text(photosdb):
"""test PhotoInfo.detected_text"""
for uuid, expected_text in UUID_DETECTED_TEXT.items():
photo = photosdb.get_photo(uuid=uuid)
detected_text = " ".join(text for text, conf in photo.detected_text())
if expected_text is not None:
assert expected_text in detected_text
else:
assert not detected_text
| 35.596299 | 7,228 | 0.694772 |
import datetime
import os
import os.path
import pathlib
import sqlite3
import tempfile
import time
from collections import Counter, namedtuple
import pytest
import osxphotos
from osxphotos._constants import _UNKNOWN_PERSON
from osxphotos.utils import _get_os_version
OS_VERSION = _get_os_version()
SKIP_TEST = "OSXPHOTOS_TEST_EXPORT" not in os.environ or OS_VERSION[1] != "15"
PHOTOS_DB_LOCAL = os.path.expanduser("~/Pictures/Photos Library.photoslibrary")
PHOTOS_DB = "tests/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_DB_PATH = "/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_LIBRARY_PATH = "/Test-10.15.7.photoslibrary"
PHOTOS_DB_LEN = 25
PHOTOS_NOT_IN_TRASH_LEN = 23
PHOTOS_IN_TRASH_LEN = 2
PHOTOS_DB_IMPORT_SESSIONS = 17
KEYWORDS = [
"Kids",
"wedding",
"flowers",
"England",
"London",
"London 2018",
"St. James's Park",
"UK",
"United Kingdom",
"foo/bar",
"Travel",
"Maria",
"Drink",
"Val d'Isère",
"Wine",
"Wine Bottle",
"Food",
"Furniture",
"Pizza",
"Table",
"Cloudy",
"Cord",
"Outdoor",
"Sky",
"Sunset Sunrise",
]
PERSONS = ["Katie", "Suzy", "Maria", _UNKNOWN_PERSON]
ALBUMS = [
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum",
"2019-10/11 Paris Clermont",
"AlbumInFolder",
"EmptyAlbum",
"I have a deleted twin",
"Multi Keyword",
"Pumpkin Farm",
"Raw",
"Sorted Manual",
"Sorted Newest First",
"Sorted Oldest First",
"Sorted Title",
"Test Album", # there are 2 albums named "Test Album" for testing duplicate album names
]
KEYWORDS_DICT = {
"Drink": 2,
"England": 1,
"Kids": 4,
"London 2018": 1,
"London": 1,
"Maria": 1,
"St. James's Park": 1,
"Travel": 2,
"UK": 1,
"United Kingdom": 1,
"Val d'Isère": 2,
"Wine Bottle": 2,
"Wine": 2,
"flowers": 1,
"foo/bar": 1,
"wedding": 3,
"Food": 2,
"Furniture": 2,
"Pizza": 2,
"Table": 2,
"Cloudy": 2,
"Cord": 2,
"Outdoor": 2,
"Sky": 2,
"Sunset Sunrise": 2,
}
PERSONS_DICT = {"Katie": 3, "Suzy": 2, "Maria": 2, _UNKNOWN_PERSON: 1}
ALBUM_DICT = {
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum": 1,
"2019-10/11 Paris Clermont": 1,
"AlbumInFolder": 2,
"EmptyAlbum": 0,
"I have a deleted twin": 1,
"Multi Keyword": 2,
"Pumpkin Farm": 3,
"Raw": 4,
"Sorted Manual": 3,
"Sorted Newest First": 3,
"Sorted Oldest First": 3,
"Sorted Title": 3,
"Test Album": 2,
} # Note: there are 2 albums named "Test Album" for testing duplicate album names
UUID_DICT = {
"missing": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"favorite": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"not_favorite": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"hidden": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"not_hidden": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"has_adjustments": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"adjustments_info": "7783E8E6-9CAC-40F3-BE22-81FB7051C266",
"no_adjustments": "D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068",
"location": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_location": "6191423D-8DB8-4D4C-92BE-9BBBA308AAC4",
"external_edit": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_external_edit": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"export": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"export_tif": "8846E3E6-8AC8-4857-8448-E3D025784410",
"in_album": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"date_invalid": "8846E3E6-8AC8-4857-8448-E3D025784410",
"intrash": "71E3E212-00EB-430D-8A63-5E294B268554",
"not_intrash": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"intrash_person_keywords": "6FD38366-3BF2-407D-81FE-7153EB6125B6",
"import_session": "8846E3E6-8AC8-4857-8448-E3D025784410",
"movie": "D1359D09-1373-4F3B-B0E3-1A4DE573E4A3",
"description_newlines": "7F74DD34-5920-4DA3-B284-479887A34F66",
"no_duplicates": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"multi_query_1": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"multi_query_2": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
}
UUID_DICT_LOCAL = {
"not_visible": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_key": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_not_key": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst_selected": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"burst_not_selected": "89E235DD-B9AC-4E8D-BDA2-986981CA7582", # IMG_9813.JPG
"burst_default": "F5E6BD24-B493-44E9-BDA2-7AD9D2CC8C9D", # IMG_9816.JPG
"burst_not_default": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"live_edited": "54A01B04-16D7-4FDE-8860-19F2A641E433", # IMG_3203.HEIC
"live": "8EC216A2-0032-4934-BD3F-04C6259B3304", # IMG_3259.HEIC
}
UUID_PUMPKIN_FARM = [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
]
ALBUM_SORT_ORDER = [
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
]
ALBUM_KEY_PHOTO = "D79B8D77-BFFC-460B-9312-034F2877D35B"
UTI_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.jpeg",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
UTI_ORIGINAL_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.heic",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
RawInfo = namedtuple(
"RawInfo",
[
"comment",
"original_filename",
"has_raw",
"israw",
"raw_original",
"uti",
"uti_original",
"uti_raw",
],
)
RAW_DICT = {
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068": RawInfo(
"raw image, no jpeg pair",
"DSC03584.dng",
False,
True,
False,
"com.adobe.raw-image",
"com.adobe.raw-image",
None,
),
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": RawInfo(
"raw+jpeg, jpeg original",
"IMG_1994.JPG",
True,
False,
False,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"4D521201-92AC-43E5-8F7C-59BC41C37A96": RawInfo(
"raw+jpeg, raw original",
"IMG_1997.JPG",
True,
False,
True,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51": RawInfo(
"jpeg, no raw",
"wedding.jpg",
False,
False,
False,
"public.jpeg",
"public.jpeg",
None,
),
}
ORIGINAL_FILENAME_DICT = {
"uuid": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"filename": "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg",
"original_filename": "Pumkins2.jpg",
}
UUID_IS_REFERENCE = "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
UUID_NOT_REFERENCE = "F12384F6-CD17-4151-ACBA-AE0E3688539E"
UUID_DUPLICATE = ""
UUID_DETECTED_TEXT = {
"E2078879-A29C-4D6F-BACB-E3BBE6C3EB91": "osxphotos",
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": None,
}
@pytest.fixture(scope="module")
def photosdb():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB)
@pytest.fixture(scope="module")
def photosdb_local():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB_LOCAL)
def test_init1():
# test named argument
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init2():
# test positional argument
photosdb = osxphotos.PhotosDB(PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init3():
# test positional and named argument (raises exception)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(PHOTOS_DB, dbfile=PHOTOS_DB)
def test_init4():
# test invalid db
(bad_db, bad_db_name) = tempfile.mkstemp(suffix=".db", prefix="osxphotos-")
os.close(bad_db)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(bad_db_name)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(dbfile=bad_db_name)
try:
os.remove(bad_db_name)
except:
pass
def test_init5(mocker):
# test failed get_last_library_path
def bad_library():
return None
# get_last_library actually in utils but need to patch it in photosdb because it's imported into photosdb
mocker.patch("osxphotos.photosdb.photosdb.get_last_library_path", new=bad_library)
with pytest.raises(Exception):
assert osxphotos.PhotosDB()
def test_db_len(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert len(photosdb) == PHOTOS_DB_LEN
def test_db_version(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert photosdb.db_version == "6000"
def test_persons(photosdb):
assert "Katie" in photosdb.persons
assert Counter(PERSONS) == Counter(photosdb.persons)
def test_keywords(photosdb):
assert "wedding" in photosdb.keywords
assert Counter(KEYWORDS) == Counter(photosdb.keywords)
def test_album_names(photosdb):
assert "Pumpkin Farm" in photosdb.albums
assert Counter(ALBUMS) == Counter(photosdb.albums)
def test_keywords_dict(photosdb):
keywords = photosdb.keywords_as_dict
assert keywords["wedding"] == 3
assert keywords == KEYWORDS_DICT
def test_persons_as_dict(photosdb):
persons = photosdb.persons_as_dict
assert persons["Maria"] == 2
assert persons == PERSONS_DICT
def test_albums_as_dict(photosdb):
albums = photosdb.albums_as_dict
assert albums["Pumpkin Farm"] == 3
assert albums == ALBUM_DICT
def test_album_sort_order(photosdb):
album = [a for a in photosdb.album_info if a.title == "Pumpkin Farm"][0]
photos = album.photos
uuids = [p.uuid for p in photos]
assert uuids == ALBUM_SORT_ORDER
def test_album_empty_album(photosdb):
album = [a for a in photosdb.album_info if a.title == "EmptyAlbum"][0]
photos = album.photos
assert photos == []
def test_attributes(photosdb):
photos = photosdb.photos(uuid=["D79B8D77-BFFC-460B-9312-034F2877D35B"])
assert len(photos) == 1
p = photos[0]
assert p.keywords == ["Kids"]
assert p.original_filename == "Pumkins2.jpg"
assert p.filename == "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
assert p.date == datetime.datetime(
2018, 9, 28, 16, 7, 7, 0, datetime.timezone(datetime.timedelta(seconds=-14400))
)
assert p.date_added == datetime.datetime(
2019,
7,
27,
9,
16,
49,
778432,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
)
assert p.description == "Girl holding pumpkin"
assert p.title == "I found one!"
assert sorted(p.albums) == ["Multi Keyword", "Pumpkin Farm", "Test Album"]
assert p.persons == ["Katie"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/D/D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
)
assert p.ismissing == False
def test_attributes_2(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert sorted(p.keywords) == ["Maria", "wedding"]
assert p.original_filename == "wedding.jpg"
assert p.filename == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
assert p.date == datetime.datetime(
2019,
4,
15,
14,
40,
24,
86000,
datetime.timezone(datetime.timedelta(seconds=-14400)),
)
assert p.description == "Bride Wedding day"
assert p.title is None
assert sorted(p.albums) == [
"AlbumInFolder",
"I have a deleted twin",
"Multi Keyword",
]
assert p.persons == ["Maria"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
)
assert not p.ismissing
assert p.hasadjustments
assert p.height == 1325
assert p.width == 1526
assert p.original_height == 1367
assert p.original_width == 2048
assert p.orientation == 1
assert p.original_orientation == 1
assert p.original_filesize == 460483
def test_missing(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert len(photos) == 1
p = photos[0]
assert p.path is None
assert p.ismissing == True
def test_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == True
def test_not_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == False
def test_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == True
def test_not_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == False
def test_visible(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.visible
def test_not_burst(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert not p.burst
def test_location_1(photosdb):
# test photo with lat/lon info
photos = photosdb.photos(uuid=[UUID_DICT["location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat == pytest.approx(51.50357167)
assert lon == pytest.approx(-0.1318055)
def test_location_2(photosdb):
# test photo with no location info
photos = photosdb.photos(uuid=[UUID_DICT["no_location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat is None
assert lon is None
def test_hasadjustments1(photosdb):
# test hasadjustments == True
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == True
def test_hasadjustments2(photosdb):
# test hasadjustments == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == False
def test_external_edit1(photosdb):
# test image has been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == True
def test_external_edit2(photosdb):
# test image has not been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["no_external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == False
def test_path_edited1(photosdb):
# test a valid edited path
photos = photosdb.photos(uuid=["E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51"])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path.endswith(
"resources/renders/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51_1_201_a.jpeg"
)
assert os.path.exists(path)
def test_path_edited2(photosdb):
# test an invalid edited path
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path is None
def test_path_derivatives(photosdb):
# test an path_derivatives
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_derivatives
derivs = [
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_100_o.jpeg",
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_105_c.jpeg",
]
for i, p in enumerate(path):
assert p.endswith(derivs[i])
def test_ismovie(photosdb):
# test ismovie == True
photos = photosdb.photos(uuid=[UUID_DICT["movie"]])
p = photos[0]
assert p.ismovie
def test_not_ismovie(photosdb):
# test ismovie == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
p = photos[0]
assert not p.ismovie
def test_count(photosdb):
photos = photosdb.photos()
assert len(photos) == PHOTOS_NOT_IN_TRASH_LEN
def test_photos_intrash_1(photosdb):
photos = photosdb.photos(intrash=True)
assert len(photos) == PHOTOS_IN_TRASH_LEN
def test_photos_intrash_2(photosdb):
photos = photosdb.photos(intrash=True)
for p in photos:
assert p.intrash
def test_photos_intrash_3(photosdb):
photos = photosdb.photos(intrash=False)
for p in photos:
assert not p.intrash
def test_photoinfo_intrash_1(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["intrash"]], intrash=True)[0]
assert p.intrash
assert p.date_trashed.isoformat() == "2120-06-10T11:24:47.685857-05:00"
def test_photoinfo_intrash_2(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["intrash"]])
assert not p
def test_photoinfo_intrash_3(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["intrash_person_keywords"]], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_4(photosdb):
p = photosdb.photos(persons=["Maria"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_5(photosdb):
p = photosdb.photos(keywords=["wedding"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_not_intrash(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["not_intrash"]])[0]
assert not p.intrash
assert p.date_trashed is None
def test_keyword_2(photosdb):
photos = photosdb.photos(keywords=["wedding"])
assert len(photos) == 2 # won't show the one in the trash
def test_keyword_not_in_album(photosdb):
photos1 = photosdb.photos(albums=["Pumpkin Farm"])
photos2 = photosdb.photos(keywords=["Kids"])
photos3 = [p for p in photos2 if p not in photos1]
assert len(photos3) == 1
assert photos3[0].uuid == "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
def test_album_folder_name(photosdb):
photos = photosdb.photos(albums=["Pumpkin Farm"])
assert sorted(p.uuid for p in photos) == sorted(UUID_PUMPKIN_FARM)
def test_multi_person(photosdb):
photos = photosdb.photos(persons=["Katie", "Suzy"])
assert len(photos) == 3
def test_get_db_path(photosdb):
db_path = photosdb.db_path
assert db_path.endswith(PHOTOS_DB_PATH)
def test_get_library_path(photosdb):
lib_path = photosdb.library_path
assert lib_path.endswith(PHOTOS_LIBRARY_PATH)
def test_get_db_connection(photosdb):
conn, cursor = photosdb.get_db_connection()
assert isinstance(conn, sqlite3.Connection)
assert isinstance(cursor, sqlite3.Cursor)
results = conn.execute(
"SELECT ZUUID FROM ZGENERICASSET WHERE ZFAVORITE = 1;"
).fetchall()
assert len(results) == 1
assert results[0][0] == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51"
conn.close()
def test_export_1(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_2(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_3(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
filename2 = pathlib.Path(filename)
filename2 = f"{filename2.stem} (1){filename2.suffix}"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_4(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
filename2 = f"osxphotos-export-2-test-{timestamp} (1).jpg"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_5(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_6(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_7(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
got_dest = photos[0].export(dest)[0]
with pytest.raises(Exception) as e:
assert photos[0].export(dest, increment=False)
assert e.type == type(FileExistsError())
def test_export_8(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos[0].export(dest) == []
def test_export_9(photosdb):
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest, edited=True)
assert e.type == ValueError
def test_export_10(photosdb):
# try to export edited file that's not edited and name provided
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
with pytest.raises(Exception) as e:
assert photos[0].export(dest, filename, edited=True)
assert e.type == ValueError
def test_export_11(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename, edited=True)[0]
assert got_dest == expected_dest
def test_export_12(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
edited_name = pathlib.Path(photos[0].path_edited).name
edited_suffix = pathlib.Path(edited_name).suffix
filename = (
pathlib.Path(photos[0].original_filename).stem + "_edited" + edited_suffix
)
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, edited=True)[0]
assert got_dest == expected_dest
def test_export_13(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
i = 0
while os.path.isdir(dest):
dest = os.path.join(dest, str(i))
i += 1
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest)
assert e.type == type(FileNotFoundError())
def test_export_14(photosdb, caplog):
# test export with user provided filename with different (but valid) extension than source
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export_tif"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.tif"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
assert "Invalid destination suffix" not in caplog.text
def test_export_no_original_filename(photosdb):
# test export OK if original filename is null
# issue #267
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
# monkey patch original_filename for testing
original_filename = photos[0]._info["originalFilename"]
photos[0]._info["originalFilename"] = None
filename = f"{photos[0].uuid}.jpeg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
photos[0]._info["originalFilename"] = original_filename
def test_eq():
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["export"]])
assert photos1[0] == photos2[0]
def test_eq_2():
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["in_album"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["in_album"]])
# memoize a value
albums = photos1[0].albums
assert albums
assert photos1[0] == photos2[0]
def test_not_eq(photosdb):
photos1 = photosdb.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos1[0] != photos2[0]
def test_photosdb_repr():
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = eval(repr(photosdb))
ignore_keys = ["_tmp_db", "_tempdir", "_tempdir_name", "_db_connection"]
assert {k: v for k, v in photosdb.__dict__.items() if k not in ignore_keys} == {
k: v for k, v in photosdb2.__dict__.items() if k not in ignore_keys
}
def test_photosinfo_repr(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
photo = photos[0]
photo2 = eval(repr(photo))
assert {k: str(v).encode("utf-8") for k, v in photo.__dict__.items()} == {
k: str(v).encode("utf-8") for k, v in photo2.__dict__.items()
}
def test_from_to_date(photosdb):
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(from_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 16
photos = photosdb.photos(to_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 7
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28), to_date=datetime.datetime(2018, 9, 29)
)
assert len(photos) == 4
def test_from_to_date_tz(photosdb):
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28, 13, 7, 0),
to_date=datetime.datetime(2018, 9, 28, 13, 9, 0),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
photos = photosdb.photos(
from_date=datetime.datetime(
2018,
9,
28,
16,
7,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
to_date=datetime.datetime(
2018,
9,
28,
16,
9,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
def test_date_invalid():
# doesn't run correctly with the module-level fixture
from datetime import datetime, timedelta, timezone
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
delta = timedelta(seconds=p.tzoffset)
tz = timezone(delta)
assert p.date == datetime(1970, 1, 1).astimezone(tz=tz)
def test_date_modified_invalid(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
assert p.date_modified is None
def test_import_session_count(photosdb):
import_sessions = photosdb.import_info
assert len(import_sessions) == PHOTOS_DB_IMPORT_SESSIONS
def test_import_session_photo(photosdb):
photo = photosdb.get_photo(UUID_DICT["import_session"])
import_session = photo.import_info
assert import_session.creation_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
729811,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.start_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.end_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert len(import_session.photos) == 1
def test_uti(photosdb):
for uuid, uti in UTI_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.uti == uti
assert photo.uti_original == UTI_ORIGINAL_DICT[uuid]
def test_raw(photosdb):
for uuid, rawinfo in RAW_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.original_filename == rawinfo.original_filename
assert photo.has_raw == rawinfo.has_raw
assert photo.israw == rawinfo.israw
assert photo.uti == rawinfo.uti
assert photo.uti_original == rawinfo.uti_original
assert photo.uti_raw == rawinfo.uti_raw
def test_verbose(capsys):
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB, verbose=print)
captured = capsys.readouterr()
assert "Processing database" in captured.out
def test_original_filename(photosdb):
uuid = ORIGINAL_FILENAME_DICT["uuid"]
photo = photosdb.get_photo(uuid)
assert photo.original_filename == ORIGINAL_FILENAME_DICT["original_filename"]
assert photo.filename == ORIGINAL_FILENAME_DICT["filename"]
original_filename = photo._info["originalFilename"]
photo._info["originalFilename"] = None
assert photo.original_filename == ORIGINAL_FILENAME_DICT["filename"]
photo._info["originalFilename"] = original_filename
# They test things difficult to test in the test libraries
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_not_visible_burst(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["not_visible"])
assert not photo.visible
assert photo.burst
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_visible_burst(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst"])
assert photo.visible
assert photo.burst
assert len(photo.burst_photos) == 4
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_key(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_key"])
assert photo.burst_key
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_key"])
assert not photo.burst_key
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_selected(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_selected"])
assert photo.burst_selected
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_selected"])
assert not photo.burst_selected
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_default_pic(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_default"])
assert photo.burst_default_pick
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_default"])
assert not photo.burst_default_pick
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live_edited"])
assert photo.path_edited_live_photo is not None
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo_not_edited(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live"])
assert photo.path_edited_live_photo is None
def test_is_reference(photosdb):
photo = photosdb.get_photo(UUID_IS_REFERENCE)
assert photo.isreference
photo = photosdb.get_photo(UUID_NOT_REFERENCE)
assert not photo.isreference
def test_adjustments(photosdb):
from osxphotos.adjustmentsinfo import AdjustmentsInfo
photo = photosdb.get_photo(UUID_DICT["adjustments_info"])
adjustments = photo.adjustments
assert isinstance(adjustments, AdjustmentsInfo)
assert adjustments.asdict() == {
"data": b"mW[\xb7\xa2:\xb3\xfd/\xbe\xda\xa3\x17((\xf4\x18\xdf\x03H\xc2E\xb9%\\\xc4\xb3\xce\x03\x02\x12.\x82\n\x1at\x8f\xfd\xdf\xbf\xb8\xba\xfb\xec\xdec\x1c\xde\x92\xaa\xcc\x9aU\t\xa9\x99\xbff\x8f\xe26T}gv\xa7~\xf6\xe3\xaf\xd9\xf1^\xb5\xb9s?\x1f\x8b\xdb\xec\xc7\x8c\x97\xf5\xf5r\xf6m\x96^.\xd1O\xbf\xf7\xe4\x8a\xff\xce}\xe7\x17\x1c3\x0c\x19)\xce)*\x1e\xd5O#\xffmvi\xd3\xf1\xd4\xdf\xce\xcc\xd3\xc5\xfb\xd9\xdf\xdff\xe7bL\xf3tL\xdf\xf8\xe7t\x18\x8b[\\\xe5#\x99\xfdXr\x0b\x81-\xa8.E[u\xc5?\x11\xd8\xba\xef\x02C\xff\xe9l\x14UI\xc6\xd9\x0f\x81[.\xbe\xcd\xfa[Utc:\xfe\x0c\xc7\xd0\xdf\xb1\xd2\xf1\xff\x163\x06i^\xdf\x87\xf1\xcc\xdc\x86\xd9\x8f\xff\xf9\xeb\xff\xf1(\xba\xf4\xd8\x16\xf9\xec\xc7x\xbb\x17,\x8bb\x1c\xab\xae\x1c\xde\x04\xfb\xd3\x89\rw}\x96\xb6\xbb\x9fq\xb9o\xbf&\r6n\xdfs\xc3\xd7d\xd5]\xee\xe3o\x9f\xefKn\xbd\x14\xc4\xe5\x8a\x93\x16\xc2ZX,\xe4\xdf\xab\xc0t\xe9\x87\xfb\xad\xf8\x03Hm\xd3\xac\xf1\xfa\xaa\xfb\x13]\xbd\xbd\xa1\xbab\xf8\x89>\xbcs\x1c\xc6*\xfbbu\xe1\x16\xef \x1c\xb7\x96\x84%\xbf\\/DA\xe6xy\xc5\xadY\xfdD\xee\xcb&K\xdcR^\xf0\xe2JZ-\xd6\x82\xc8I\xac\x12\xf7\xb1\x8f\xd2\xf6\xfe\x0e\xfe}!\x89+\xee\x8f\x8f\x15\xf3\xf8'\x11\x86\xbe\xe4\xe5\xf5J\xe4Y\xa5EYZ\xf0k\xf1\xdbl\xec\xbb\xb4EiW\x16\xbf\x82\x08\xe2j\xcd\t\xb2\xb4\\\x8bk\xf1\xbd}\x0b\xf1\xcb\xb2\x14\x17\xb2\xc0\xf3\xeb\x95\xb0\xe6DIZ,\x99I\x96\xde&Q\xfe\xf7\xc7\x88}\x95\xd1N/l\xb3at\xd9\xe6\xdc\xe5\x88\xa3\xc6\x8f\x15q\x8f\xf8\xc6\x89U'\x860\xb9\xda\x1b\xf7b\xc1\xf2\x18\xab\xe7;\xe4\x13Ro\x82\xb5%\x83\xaa\xe1\x0e\xc4\x8c-\xd8\xf2\x9e\x19\xe9m\x9c\xf2\xf9\x18\xc7r\x9a\xb5\xfcb\xbfl\xb5\xcf\x0fbQ\xad\r\xbd\xa8\xc9\x13\x0bf^\x84\x94\t\xaa\x073\x06$\xd1#\x07\xc4\xaa\xb5\x07m\x92\xc4\x1b\xdd\xb4\xd2\xd6I\xa6G\t\x97Jy\x0co4\xcc\xc5\x88\x8f\x0eC\xb4\xe0\x0fG\xfe2\xed\x8d\xe8T\xa8gM\xc3\x8d\x13Q1fD\xa2H\x831\xe2s#\xe2\xc8\x1e\xc3\x9c\xe1\xb6\x0c\xb7\t\xe2\xe6fz\xe9\xf0\xf8\xfc\x08\xd7\xa2\xc6\x0f\xdeAEcx>\x84)\x8c\xae\xd1\x83\x1b\x86Mm\xc5\xa7)k[Q\x80Op\xc0\xaa\xca\x80\x92c\xa46\x19\x08\x84\xd0\x00\xf9\x1eG\xc4b\x80\x07\xdc\xb6\xdb\x98\x1b\xb3\x00\xf2\xf6\xbe\x8aJt\x02\xce\xa6\x94[\xb7C\xf8\x14\xa1>\xd2/Q\xf3,??\xb6\\\x98!\xd2p\xa1\xd7\xbb\xa6j\x9d\xd0\x9c1\xa3\x9c\xa3\xbd\xec\xd4P\xe5\x04\xc3\xdf\x80\x97m\xdc\x8c\xc7/\xc0F,\x83\x05\xf4\x92\x92\xd3\xb5\xd8\xe7\x1fZ\xf4\xf9\x11\x19\xf6\xa2\xdc\xc0!\x12\xac\r?\xc5%L\xa5\x90\x12\x13C\xd5\x0c\xa3\t\xed\xdd\xb8\xc7\x11\xaa\xb6x\xab\x9aI\xf3\x8ba\xc3\xf6\x8e\x9f\x18 \x7f\xfa\x02$\xacV~\xe8\xc4\xad\xb5rt;\xcc\x91\xca;\xb2\xb2\xa7\x93\xdb\x81\xa7\x1f\x00b#\xad\xc9\xf6\x08e!\x8c\xca\x18?\xbd\xc2J\xb3\xea\x10^\xaa/\x82\xdc\x9b \xc3\x0b\x7f\xe1\xb5\xb0\xd1\xe2\xc4QK\xf1\x1ey\x02r\xc9\xd6\x02HA\x00\x99\x18t~\x98\xf3\xa2\x94$!\x8a&'\x82\x93\xbf\xe7P\xbe\x87\xe7\xb2\xfd\xfch\x96\x9f\x1f\xf8!\xff\xc30\xe4\x8b\xdf\x88\xe1\xdevsU\x1c\xbdk\xc96\x8b\xce\xe5mB\xaf=l\xb9\xb8s\x8e7^\\\xb2cD\xae\xefc\xd9\xf6\xfb\x18E7k\xa4\x97X\x9b\x9f\xf0]Y\xed\xc1\xa5\xfb\xaa!\xf7\xab\x86<l\xbde\xdf\x1fp\x1e\x9a\xb1\x99\x14jG\xf4s\x9f\x132\xef\x8d.\xa9m\x1c\x1fL\xbd\xd9?T\xb0\xc3\x9f\x1f\xd6\x96\x01\x1c\xf5\xa6\x8coj\xb1E)\xb1W\xcd\xeb\x10\xe4\xb2\xcbq\x9f\x1fy0w|\x9e7\x82p'\x04\xe5\xa4\x10\xedI\x91\x8b@\x0c\xe2\x81\xac'\xbf5_\xc3\x0b\x05H\xb79\xfb\xee\xa1q\x05\xfa\x88\xa56\x15\x10R\x0f(\x92\xab\xbd|\x84\xc8\x0e\x82\x81\xe2;\xd9J\xc6\xc5?f\x13}\xc0'\xf5\xfcR8i1\x87_\xca<\xd5(\xf5\x81\x1a>\xb5)\xb9x5\xef\xfaP\x91\x02\xed\x00\x1c\xa7\xbf6\xe1\x93B\xc8!\x8d2<\x02|\x80\x8c\x1e\xc4\nN\xc8Xou\xfb\xe2W\xc9\xc2|\xf9\xc7\xb4\x94oo\x1c\x9d\nX#\xbd\xa3Q\x0eCl\x16\xce\xb3a\xd9\xc8\x9b0\x18\xed\xddR\xb4\x1f\xaf+\x82j\x883\x04\xcf\xf0\x98\xc5t\xf2}\xfd\xe4xm\xab\xd6a\x1c\xde\x0e\xf8\xd0\x99\xe7KtT\xa31\xea\x14'\xf3\xb9\x9d\x86\xedt\x8b\xc1`\xe2\xbe\xb6kE\xb2_bV@Q4\xba\xa6|Vk\xdf\x16{O#\xd3\x11l\xa8g\xa2tm\xb8M\xb8\xa6\x82\xa9\xf9\x99WD\x8el\xb8y\x9c\xc1v\x02\x9d\xe2\xea>54\xc4\x9d\xed']\xee\xb4\xecfW\r\xb55n(\xf4\x8d\x9d\xec\xe9\xe3\xa4\xae6\xd66\xaa\x16j\x04\xe1\xa8`\xaa|~\x9c\xb4K\xef\x18>\x97\xb3\x04=\xb1\\\x9c4?q6H\xe6\xad\x8b\xe9\xe5\x94_j\x88\x01\xe3Ar\xb8\x90\xf3kG\xd9\xd5\xc3\xdd\xc5D\xda\xdf\x9d\xbal\nEOh\xd9U\xaf\xb3\xc1\x9b\x87\x0b\xe9pp:\xf7s\xfa\xf9!k~co\xc9\xee\xbc=\xd9\xaeD\x17\x08t\t\xceU\x93U\x88\xc3\xa6B\x91\xa5\r\x12\xae\xc7\xad\x0b\x92\x97\xaf\xeb\xca\xc1TV\xb5\x9en\"\xc1\xce\xab\xca\x9ao\xe5vs\xf3\xe5\xd1\x08\xedC\x80^km\x0e\x1c\x80\xfc\x00\x9at\x7fUwW\xb0\xf5
"editor": "com.apple.Photos",
"format_id": "com.apple.photo",
"base_version": 0,
"format_version": "1.5",
"adjustments": [
{
"formatVersion": 1,
"enabled": True,
"settings": {
"offsetLocalLight": 0,
"offsetHighlights": 0,
"inputLight": 0.3073453608247423,
"offsetExposure": 0,
"offsetBlackPoint": 0,
"offsetBrightness": 0,
"statistics": {
"p02": 0.00784313725490196,
"p50": 0.09803921568627451,
"autoValue": 0.2856,
"blackPoint": 0.0031976514035982175,
"tonalRange": 0.09845670498375754,
"p25": 0.03529411764705882,
"p98": 0.6,
"lightMap": "FVpKd0pbSVkQWA5XR1kNWBNWFFYqMCOpJFgbWBmuF1YhjCT7J9Eik0ZhIWJFl1PIVGlWa1dtWW9acl12X3lD/hJwDlUPVkdYJFcPVRAxFBZIWEhYGVNEWBJXElYYWCGIJalNYxvgF3AgbUrwUd5V1VZsV21Zb1pxXHVfeBmDDSkNVw5WF1YVVDFWR1dHV0hXSFdIWElYGVkTWkrIPasv/U75D1sPZBRtUmZUaFVqVv0ssCjJWfxcll54FyEZSBBWR1YbVBkcET4UHEdXSVhJWElZSllKW0tcTF1MXiVgRfENCg9lOnRSfVRoVGpVkyg/K0UcRhk0UPoOIBJfR+dHVw0NDzMaHB9YSFhJWElZSlpKWktbTF1MXk5gT2FPYg0GDWQ1vDV/VHM2gCFsV4JC1xWgFa8UwhISFBIUVxRXOWoSVRiKSKBIYklZSllKWkpbS1xMXk1fT2FPYhBmDQUNWlJ6NGMUdRB1N9AXwxOnEyQTEhMRDkcXRRcUFVgWSyPeJaciZUpiSlpKW0tbTFxMXU1fT2FPYlFkDWYNBg5uVP4ROhKJERARERISEnQUd158YYURVxNVFxQX0RdXFmgl/k3/Sv9KWkpbS1xMXU1eT2FPYlFkUXMOdB5tPqgv/w+9KYwqoFl0WnNbr153X3lhq0pbSloXWRVrJtwpWD+fSuA6XEpnTF1MX05gT2FPY1FlP3ooZSdUIWIYeBnhGmodhh+oHnYjMSWZIGkXvBELS/JKXEpbGkgWrBeKRahM6kzZTd9O00/dT+NQ11HTUL4TgxAhDywROREWEWsh7xQlIzszRTRGM0MuPRt6EoVMXUxeFFwPEA8ODzQRRhLFEswSuhK8HpQbcxwvFywPQg4fDW0SzA+aDwwQEBUyDxYpPj1OQFA8TDZENNoqkUywFF0RDw8ODhEQERHoEWASYhtjKGMpQiY2IzQbag9rDwwQGw4SDhoNDw0SFSIeNyk9O09CUTtML35MvzqRFBUScRFmFbcWwxQQGfNPllBjUWUrZSZnImpVbBVtVnANcQ0LDSMaKSEsISojMjA8Mz5ceF55Hnkgyi7QM5oPDhxbECwPIRa7HOkU7w4IDQcPeVN9HOdWcFlxEnAOGQwHDR0mMyw3LDcrMikwMD0seGCMYXwvfB6CJKVi2BVFFtASwA/fDpoNHQ0dDwwP5g2fDQYNCR91JpIPdw13DRAOGSs8N0U0QjNALjsuOSh8XuNjgkeAGYwgnizmH1IZphnSTfmo+w/9DQkMKhLmKfMO8w2REnYSdBIRFB0SIAwRJDs8SjtKOEYzQGGAZIA6jGaCV4MdiiJ+K9lCrQ9tHUMZTRz7D+ENERQTFIwXqBLqEKQVGRkgHCQdJR0nDR4NKylEKUgpRCQ8D4FmhFqOZ9NjiBmDGZUw9FnPDa8QqBnNOMcRxRwnGjMdYRwfGRoUGiEsJjArNSk1JDQfLg0KFhwlLCsyDzAPFg8NUolmiGuMLp8jnCCdJKMZlBEsEB8SPh7jHSclLiYvJDIjLyEzKzwzRDNFMUQxRBEzEhMXGhwnKEcSERE9ETcSj1GPaJVWkxiOHoweoxkpFB0ODg0nDyMjNS47Mj0yPjA+ITUhQTpOPVE5Sw1CEQ0XICMvJS4qahVNJlw4dR9mKFckZyR1GZ0TPyOhHFYMEw8ZEBMdJSImHjohPiNAMD8sPCs0LTkkNg0bDBcMFRgmHSksOyzdJMAeaC/PI4UnqSVPH34UhBNCD2UPJw9qExsYIyMnIiUhJSQuJzwyQDVDMT0uOCMvDhcMIhQUDRAnPTJ4L4kjvidvMNouliyFJmshqhtvEzgblxlgJn0pjiEqIigjKSUrJ3s+Tj1NNkUzQit2DlISDg0NFXAMCw8dGEsfkje/KHgimSVgLrcXRR6TErcPcxt3FGwhjh23FKonMidwFEcUnw8vEK8QChBPGcoNBxMSDkEUaA4UElYWPx9wHaEmzxedF1AbVRlpGmAajRFjHJkVcxySIn0TihdyElMSLBXSJOYY7RAWEQsRsQ0HFRYOPhMZF4UZgBaAGlwgxSTDFakWhCWlFZYXdhZkD4INXQ9iD2td3w5yEZoNVQ/RL9cSuxfIFFkQCg8XDR4UGRdBGV4fsxhuFcYtjiDYHIwbihiEE5QRbRVlFHISUQ1TEFgPaA2cD4ASxw9kFowpnhyLHG0hbg9YDwgNCg0PGVohgSO7F54XghvBFoUXmhY9GIwWfxNhE34PMRKhEekOxw5uDykNVhF6F8sr0CWhLpQ1/yL+HqgOCA0HDUsqtiuyJYYUtRJhFXoTaxNoD04SeBOBE5MURRE+ES4PDw0LDhoVFw9QEpIQahy2D24RQxF2ENsQjA4JDQUOPiHJKIQVaw8qEmYSVg8wEnUPUw15EXUssRFhEVEQaRkbEnYMDA+bEX4UkRJ1G8AcuQ9fDB4Taw+cDQcNBRNBGtMczSOHI4YTUREfEVkXkBx8EoQTnRNuDnoNJg4wElsNYRWjE8MSYyPTTeFJuA2gDAUNjQ+WDysNBw0JHlkREynRF6YenRNkEZAPLQ9KGXEPnhGSD3gPfg0gD3o=",
"localAutoValue": 0.36000000000000004,
"whitePoint": 1.003921568627451,
"p10": 0.01568627450980392,
"highKey": 0.8063460882459689,
},
"offsetContrast": 0,
"offsetShadows": 0,
},
"identifier": "SmartTone",
}
],
"metadata": {
"masterWidth": 3024,
"pipelineVersion": "OSX.4",
"masterHeight": 4032,
"orientation": 1,
},
"orientation": 1,
"adjustment_format_version": 1,
"version_info": {
"buildNumber": "19G73",
"appVersion": "161.0.120",
"schemaRevision": 1,
"platform": "OSX",
},
"timestamp": "2020-10-03T22:54:20+00:00",
}
def test_no_adjustments(photosdb):
photo = photosdb.get_photo(UUID_DICT["no_adjustments"])
assert photo.adjustments is None
def test_exiftool_newlines_in_description(photosdb):
photo = photosdb.get_photo(UUID_DICT["description_newlines"])
exif = photo._exiftool_dict()
assert photo.description.find("\n") > 0
assert exif["EXIF:ImageDescription"].find("\n") > 0
@pytest.mark.skip(SKIP_TEST, reason="Not yet implemented")
def test_duplicates_1(photosdb):
# test photo has duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["duplicates"])
assert len(photo.duplicates) == 1
assert photo.duplicates[0].uuid == UUID_DUPLICATE
def test_duplicates_2(photosdb):
# test photo does not have duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["no_duplicates"])
assert not photo.duplicates
def test_compound_query(photosdb):
photos = photosdb.photos(persons=["Katie", "Maria"], albums=["Multi Keyword"])
assert len(photos) == 2
assert UUID_DICT["multi_query_1"] in [p.uuid for p in photos]
assert UUID_DICT["multi_query_2"] in [p.uuid for p in photos]
def test_multi_keyword(photosdb):
photos = photosdb.photos(keywords=["Kids", "wedding"])
assert len(photos) == 6
def test_multi_album(photosdb):
photos = photosdb.photos(albums=["Pumpkin Farm", "Test Album"])
assert len(photos) == 3
def test_multi_uuid(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"], UUID_DICT["not_favorite"]])
assert len(photos) == 2
def test_detected_text(photosdb):
for uuid, expected_text in UUID_DETECTED_TEXT.items():
photo = photosdb.get_photo(uuid=uuid)
detected_text = " ".join(text for text, conf in photo.detected_text())
if expected_text is not None:
assert expected_text in detected_text
else:
assert not detected_text
| true | true |
f720807430145448da0dda4234ceca5a1f6435e7 | 255 | py | Python | problems/013.py | JoshKarpel/Euler | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | 1 | 2017-09-20T22:26:24.000Z | 2017-09-20T22:26:24.000Z | problems/013.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | problems/013.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | import os
def solve():
filepath = os.path.join(os.path.dirname(__file__), '013_numbers.txt')
with open(filepath) as f:
numbers = [int(x) for x in f]
return int(str(sum(numbers))[:10])
if __name__ == '__main__':
print(solve())
| 18.214286 | 73 | 0.619608 | import os
def solve():
filepath = os.path.join(os.path.dirname(__file__), '013_numbers.txt')
with open(filepath) as f:
numbers = [int(x) for x in f]
return int(str(sum(numbers))[:10])
if __name__ == '__main__':
print(solve())
| true | true |
f7208121384b71d9a38bf011097a42030c385a61 | 798 | py | Python | app/ngrok.py | nnsnodnb/line-bot-django-handle | 27d0e29b674831eac8068124f6445d0698968f40 | [
"Apache-2.0"
] | null | null | null | app/ngrok.py | nnsnodnb/line-bot-django-handle | 27d0e29b674831eac8068124f6445d0698968f40 | [
"Apache-2.0"
] | 1 | 2020-06-05T18:48:36.000Z | 2020-06-05T18:48:36.000Z | app/ngrok.py | nnsnodnb/line-bot-django-handle | 27d0e29b674831eac8068124f6445d0698968f40 | [
"Apache-2.0"
] | null | null | null | from django.conf import settings
import requests
import socket
BASE_HOST = '127.0.0.1'
PORT = 4040
class Ngrok(object):
def __init__(self, port=PORT, *args, **kwargs):
super(Ngrok, self).__init__(*args, **kwargs)
self.port = port
self._check_launch_ngrok()
def _check_launch_ngrok(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((BASE_HOST, self.port))
# error handling: socket.error
s.close()
def get_public_url(self):
if not settings.USE_NGROK:
return None
response = requests.get(f'http://{BASE_HOST}:{self.port}/api/tunnels').json()
tunnels = response['tunnels']
tunnel = tunnels[1]
public_url = tunnel['public_url']
return public_url
| 24.9375 | 85 | 0.630326 | from django.conf import settings
import requests
import socket
BASE_HOST = '127.0.0.1'
PORT = 4040
class Ngrok(object):
def __init__(self, port=PORT, *args, **kwargs):
super(Ngrok, self).__init__(*args, **kwargs)
self.port = port
self._check_launch_ngrok()
def _check_launch_ngrok(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((BASE_HOST, self.port))
s.close()
def get_public_url(self):
if not settings.USE_NGROK:
return None
response = requests.get(f'http://{BASE_HOST}:{self.port}/api/tunnels').json()
tunnels = response['tunnels']
tunnel = tunnels[1]
public_url = tunnel['public_url']
return public_url
| true | true |
f720816e82bbc8f3addb15db8bbee82d4cadc5e1 | 811 | py | Python | scipy/sparse/data.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 9,095 | 2015-01-02T18:24:23.000Z | 2022-03-31T20:35:31.000Z | scipy/sparse/data.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 11,500 | 2015-01-01T01:15:30.000Z | 2022-03-31T23:07:35.000Z | scipy/sparse/data.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 5,838 | 2015-01-05T11:56:42.000Z | 2022-03-31T23:21:19.000Z | # This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.sparse` namespace for importing the functions
# included below.
import warnings
from . import _data
__all__ = [ # noqa: F822
'isscalarlike',
'matrix',
'name',
'npfunc',
'spmatrix',
'validateaxis',
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.sparse.data is deprecated and has no attribute "
f"{name}. Try looking in scipy.sparse instead.")
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
"the `scipy.sparse.data` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_data, name)
| 23.852941 | 76 | 0.641184 |
import warnings
from . import _data
__all__ = [
'isscalarlike',
'matrix',
'name',
'npfunc',
'spmatrix',
'validateaxis',
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.sparse.data is deprecated and has no attribute "
f"{name}. Try looking in scipy.sparse instead.")
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
"the `scipy.sparse.data` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_data, name)
| true | true |
f72081ae67b5d48042132e9f4744873649364661 | 5,831 | py | Python | idatasets/datasets/util.py | rahul1990gupta/indic-nlp-datasets | 4d0935b194263579b9653cf8c3d4ecdd17af687d | [
"MIT"
] | 1 | 2020-08-16T11:44:37.000Z | 2020-08-16T11:44:37.000Z | idatasets/datasets/util.py | rahul1990gupta/indic-nlp-datasets | 4d0935b194263579b9653cf8c3d4ecdd17af687d | [
"MIT"
] | 2 | 2020-08-16T05:23:43.000Z | 2020-10-21T06:59:15.000Z | idatasets/datasets/util.py | rahul1990gupta/indic-nlp-datasets | 4d0935b194263579b9653cf8c3d4ecdd17af687d | [
"MIT"
] | 2 | 2021-06-13T05:40:26.000Z | 2022-02-05T15:53:23.000Z | import os
from urllib.error import URLError, HTTPError
from urllib.request import urlretrieve
import tqdm
import tarfile
import zipfile
import shutil
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def download_file(
origin: str,
cache_subdir: str = "datasets") -> str:
fname = origin.split("/")[-1]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
if os.path.exists(fpath):
return fpath
global progbar
progbar = None
def dl_progress(count: int, block_size: int, total_size: int) -> None:
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
return fpath
def get_file(
origin: str,
untar: bool = False,
unzip: bool = False,
cache_subdir: str = "datasets") -> str:
"""Downloads a file from a URL if it not already in the cache."""
# https://raw.githubusercontent.com/fchollet/keras/master/keras/utils/data_utils.py
# Copyright Francois Chollet, Google, others (2015)
# Under MIT license
fname = origin.split("/")[-1].split(".")[0]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar or unzip:
untar_fpath = os.path.join(datadir, fname)
if unzip:
fpath = untar_fpath + ".zip"
else:
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
if untar:
if not os.path.exists(untar_fpath):
print("Untaring file...")
tfile = tarfile.open(fpath, "r:gz")
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
elif unzip:
if not os.path.exists(untar_fpath):
print("Unzipping file...")
with zipfile.ZipFile(fpath) as file_:
try:
file_.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
return untar_fpath
return fpath
class Bunch(dict):
"""Container object exposing keys as attributes
Bunch objects are sometimes used as an output for functions and methods.
They extend dictionaries by enabling values to be accessed by key,
`bunch["value_key"]`, or by an attribute, `bunch.value_key`.
Examples
--------
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key: str, value: str):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key: str):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
| 31.690217 | 87 | 0.575373 | import os
from urllib.error import URLError, HTTPError
from urllib.request import urlretrieve
import tqdm
import tarfile
import zipfile
import shutil
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def download_file(
origin: str,
cache_subdir: str = "datasets") -> str:
fname = origin.split("/")[-1]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
if os.path.exists(fpath):
return fpath
global progbar
progbar = None
def dl_progress(count: int, block_size: int, total_size: int) -> None:
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
return fpath
def get_file(
origin: str,
untar: bool = False,
unzip: bool = False,
cache_subdir: str = "datasets") -> str:
fname = origin.split("/")[-1].split(".")[0]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar or unzip:
untar_fpath = os.path.join(datadir, fname)
if unzip:
fpath = untar_fpath + ".zip"
else:
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
if untar:
if not os.path.exists(untar_fpath):
print("Untaring file...")
tfile = tarfile.open(fpath, "r:gz")
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
elif unzip:
if not os.path.exists(untar_fpath):
print("Unzipping file...")
with zipfile.ZipFile(fpath) as file_:
try:
file_.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
return untar_fpath
return fpath
class Bunch(dict):
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key: str, value: str):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key: str):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
pass
| true | true |
f72082ade0aa616c87208ff39d48373868d15c94 | 1,030 | py | Python | api/client/test/test_api_list_pipelines_response.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 98 | 2021-05-03T23:27:53.000Z | 2022-03-13T02:29:12.000Z | api/client/test/test_api_list_pipelines_response.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 296 | 2021-05-03T22:44:26.000Z | 2022-03-31T11:50:16.000Z | api/client/test/test_api_list_pipelines_response.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 38 | 2021-05-03T22:52:59.000Z | 2022-03-31T03:58:34.000Z | # Copyright 2021 The MLX Contributors
#
# SPDX-License-Identifier: Apache-2.0
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_list_pipelines_response import ApiListPipelinesResponse # noqa: E501
from swagger_client.rest import ApiException
class TestApiListPipelinesResponse(unittest.TestCase):
"""ApiListPipelinesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiListPipelinesResponse(self):
"""Test ApiListPipelinesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.api_list_pipelines_response.ApiListPipelinesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.409091 | 108 | 0.729126 |
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_list_pipelines_response import ApiListPipelinesResponse
from swagger_client.rest import ApiException
class TestApiListPipelinesResponse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testApiListPipelinesResponse(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f72082bda44b211831ca8f51067e919a7b88a005 | 29,302 | py | Python | stable_baselines/trpo_mpi/trpo_mpi.py | Ow-woo/stable-baselines | ece376f62b0eaa3b58e90593b7db5fb9de3d82c5 | [
"MIT"
] | 3 | 2020-04-14T15:28:02.000Z | 2020-09-23T00:55:48.000Z | stable_baselines/trpo_mpi/trpo_mpi.py | Ow-woo/stable-baselines | ece376f62b0eaa3b58e90593b7db5fb9de3d82c5 | [
"MIT"
] | 1 | 2019-10-30T07:31:52.000Z | 2019-10-30T07:31:52.000Z | stable_baselines/trpo_mpi/trpo_mpi.py | Ow-woo/stable-baselines | ece376f62b0eaa3b58e90593b7db5fb9de3d82c5 | [
"MIT"
] | 7 | 2019-10-01T05:49:22.000Z | 2021-12-24T07:11:55.000Z | import time
from contextlib import contextmanager
from collections import deque
import gym
from mpi4py import MPI
import tensorflow as tf
import numpy as np
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.tf_util import total_episode_reward_logger
from stable_baselines.common import explained_variance, zipsame, dataset, fmt_row, colorize, ActorCriticRLModel, \
SetVerbosity, TensorboardWriter
from stable_baselines import logger
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.cg import conjugate_gradient
from stable_baselines.common.policies import ActorCriticPolicy
from stable_baselines.common.misc_util import flatten_lists
from stable_baselines.common.runners import traj_segment_generator
from stable_baselines.trpo_mpi.utils import add_vtarg_and_adv
class TRPO(ActorCriticRLModel):
"""
Trust Region Policy Optimization (https://arxiv.org/abs/1502.05477)
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount value
:param timesteps_per_batch: (int) the number of timesteps to run per batch (horizon)
:param max_kl: (float) the Kullback-Leibler loss threshold
:param cg_iters: (int) the number of iterations for the conjugate gradient calculation
:param lam: (float) GAE factor
:param entcoeff: (float) the weight for the entropy loss
:param cg_damping: (float) the compute gradient dampening factor
:param vf_stepsize: (float) the value function stepsize
:param vf_iters: (int) the value function's number iterations for learning
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, lam=0.98,
entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters=3, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=1):
super(TRPO, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.using_gail = False
self.timesteps_per_batch = timesteps_per_batch
self.cg_iters = cg_iters
self.cg_damping = cg_damping
self.gamma = gamma
self.lam = lam
self.max_kl = max_kl
self.vf_iters = vf_iters
self.vf_stepsize = vf_stepsize
self.entcoeff = entcoeff
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
# GAIL Params
self.hidden_size_adversary = 100
self.adversary_entcoeff = 1e-3
self.expert_dataset = None
self.g_step = 1
self.d_step = 1
self.d_stepsize = 3e-4
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.assign_old_eq_new = None
self.compute_losses = None
self.compute_lossandgrad = None
self.compute_fvp = None
self.compute_vflossandgrad = None
self.d_adam = None
self.vfadam = None
self.get_flat = None
self.set_from_flat = None
self.timed = None
self.allmean = None
self.nworkers = None
self.rank = None
self.reward_giver = None
self.step = None
self.proba_step = None
self.initial_state = None
self.params = None
self.summary = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_pi
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, action_ph, policy.policy
return policy.obs_ph, action_ph, policy.deterministic_action
def setup_model(self):
# prevent import loops
from stable_baselines.gail.adversary import TransitionClassifier
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the TRPO model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.nworkers = MPI.COMM_WORLD.Get_size()
self.rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
if self.using_gail:
self.reward_giver = TransitionClassifier(self.observation_space, self.action_space,
self.hidden_size_adversary,
entcoeff=self.adversary_entcoeff)
# Construct network for new policy
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
# Network for old policy
with tf.variable_scope("oldpi", reuse=False):
old_policy = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
observation = self.policy_pi.obs_ph
action = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_policy.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = self.entcoeff * meanent
vferr = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
# advantage * pnew / pold
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action) -
old_policy.proba_distribution.logp(action))
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
self.loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = tf_util.get_trainable_vars("model")
var_list = [v for v in all_var_list if "/vf" not in v.name and "/q/" not in v.name]
vf_var_list = [v for v in all_var_list if "/pi" not in v.name and "/logstd" not in v.name]
self.get_flat = tf_util.GetFlat(var_list, sess=self.sess)
self.set_from_flat = tf_util.SetFromFlat(var_list, sess=self.sess)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
var_size = tf_util.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start: start + var_size], shape))
start += var_size
gvp = tf.add_n([tf.reduce_sum(grad * tangent)
for (grad, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
# Fisher vector products
fvp = tf_util.flatgrad(gvp, var_list)
tf.summary.scalar('entropy_loss', meanent)
tf.summary.scalar('policy_gradient_loss', optimgain)
tf.summary.scalar('value_function_loss', surrgain)
tf.summary.scalar('approximate_kullback-leibler', meankl)
tf.summary.scalar('loss', optimgain + meankl + entbonus + surrgain + meanent)
self.assign_old_eq_new = \
tf_util.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"),
tf_util.get_globals_vars("model"))])
self.compute_losses = tf_util.function([observation, old_policy.obs_ph, action, atarg], losses)
self.compute_fvp = tf_util.function([flat_tangent, observation, old_policy.obs_ph, action, atarg],
fvp)
self.compute_vflossandgrad = tf_util.function([observation, old_policy.obs_ph, ret],
tf_util.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if self.rank == 0 and self.verbose >= 1:
print(colorize(msg, color='magenta'))
start_time = time.time()
yield
print(colorize("done in {:.3f} seconds".format((time.time() - start_time)),
color='magenta'))
else:
yield
def allmean(arr):
assert isinstance(arr, np.ndarray)
out = np.empty_like(arr)
MPI.COMM_WORLD.Allreduce(arr, out, op=MPI.SUM)
out /= self.nworkers
return out
tf_util.initialize(sess=self.sess)
th_init = self.get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
self.set_from_flat(th_init)
with tf.variable_scope("Adam_mpi", reuse=False):
self.vfadam = MpiAdam(vf_var_list, sess=self.sess)
if self.using_gail:
self.d_adam = MpiAdam(self.reward_giver.get_trainable_variables(), sess=self.sess)
self.d_adam.sync()
self.vfadam.sync()
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.vf_stepsize))
tf.summary.scalar('advantage', tf.reduce_mean(atarg))
tf.summary.scalar('kl_clip_range', tf.reduce_mean(self.max_kl))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', ret)
tf.summary.histogram('learning_rate', self.vf_stepsize)
tf.summary.histogram('advantage', atarg)
tf.summary.histogram('kl_clip_range', self.max_kl)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', observation)
else:
tf.summary.histogram('observation', observation)
self.timed = timed
self.allmean = allmean
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
self.params = tf_util.get_trainable_vars("model") + tf_util.get_trainable_vars("oldpi")
if self.using_gail:
self.params.extend(self.reward_giver.get_trainable_variables())
self.summary = tf.summary.merge_all()
self.compute_lossandgrad = \
tf_util.function([observation, old_policy.obs_ph, action, atarg, ret],
[self.summary, tf_util.flatgrad(optimgain, var_list)] + losses)
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="TRPO",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
with self.sess.as_default():
callback.on_training_start(locals(), globals())
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_batch,
reward_giver=self.reward_giver,
gail=self.using_gail, callback=callback)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
len_buffer = deque(maxlen=40) # rolling buffer for episode lengths
reward_buffer = deque(maxlen=40) # rolling buffer for episode rewards
true_reward_buffer = None
if self.using_gail:
true_reward_buffer = deque(maxlen=40)
# Initialize dataloader
batchsize = self.timesteps_per_batch // self.d_step
self.expert_dataset.init_dataloader(batchsize)
# Stats not used for now
# TODO: replace with normal tb logging
# g_loss_stats = Stats(loss_names)
# d_loss_stats = Stats(reward_giver.loss_name)
# ep_stats = Stats(["True_rewards", "Rewards", "Episode_length"])
while True:
if timesteps_so_far >= total_timesteps:
break
logger.log("********** Iteration %i ************" % iters_so_far)
def fisher_vector_product(vec):
return self.allmean(self.compute_fvp(vec, *fvpargs, sess=self.sess)) + self.cg_damping * vec
# ------------------ Update G ------------------
logger.log("Optimizing Policy...")
# g_step = 1 when not using GAIL
mean_losses = None
vpredbefore = None
tdlamret = None
observation = None
action = None
seg = None
for k in range(self.g_step):
with self.timed("sampling"):
seg = seg_gen.__next__()
# Stop training early (triggered by the callback)
if not seg.get('continue_training', True): # pytype: disable=attribute-error
break
add_vtarg_and_adv(seg, self.gamma, self.lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
observation, action = seg["observations"], seg["actions"]
atarg, tdlamret = seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before update
atarg = (atarg - atarg.mean()) / (atarg.std() + 1e-8) # standardized advantage function estimate
# true_rew is the reward without discount
if writer is not None:
total_episode_reward_logger(self.episode_reward,
seg["true_rewards"].reshape(
(self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
args = seg["observations"], seg["observations"], seg["actions"], atarg
# Subsampling: see p40-42 of John Schulman thesis
# http://joschu.net/docs/thesis.pdf
fvpargs = [arr[::5] for arr in args]
self.assign_old_eq_new(sess=self.sess)
with self.timed("computegrad"):
steps = self.num_timesteps + (k + 1) * (seg["total_timestep"] / self.g_step)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata() if self.full_tensorboard_log else None
# run loss backprop with summary, and save the metadata (memory, compute time, ...)
if writer is not None:
summary, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
if self.full_tensorboard_log:
writer.add_run_metadata(run_metadata, 'step%d' % steps)
writer.add_summary(summary, steps)
else:
_, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
lossbefore = self.allmean(np.array(lossbefore))
grad = self.allmean(grad)
if np.allclose(grad, 0):
logger.log("Got zero gradient. not updating")
else:
with self.timed("conjugate_gradient"):
stepdir = conjugate_gradient(fisher_vector_product, grad, cg_iters=self.cg_iters,
verbose=self.rank == 0 and self.verbose >= 1)
assert np.isfinite(stepdir).all()
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
# abs(shs) to avoid taking square root of negative values
lagrange_multiplier = np.sqrt(abs(shs) / self.max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lagrange_multiplier
expectedimprove = grad.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = self.get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
self.set_from_flat(thnew)
mean_losses = surr, kl_loss, *_ = self.allmean(
np.array(self.compute_losses(*args, sess=self.sess)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(mean_losses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl_loss > self.max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
self.set_from_flat(thbefore)
if self.nworkers > 1 and iters_so_far % 20 == 0:
# list of tuples
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), self.vfadam.getflat().sum()))
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (loss_name, loss_val) in zip(self.loss_names, mean_losses):
logger.record_tabular(loss_name, loss_val)
with self.timed("vf"):
for _ in range(self.vf_iters):
# NOTE: for recurrent policies, use shuffle=False?
for (mbob, mbret) in dataset.iterbatches((seg["observations"], seg["tdlamret"]),
include_final_partial_batch=False,
batch_size=128,
shuffle=True):
grad = self.allmean(self.compute_vflossandgrad(mbob, mbob, mbret, sess=self.sess))
self.vfadam.update(grad, self.vf_stepsize)
# Stop training early (triggered by the callback)
if not seg.get('continue_training', True): # pytype: disable=attribute-error
break
logger.record_tabular("explained_variance_tdlam_before",
explained_variance(vpredbefore, tdlamret))
if self.using_gail:
# ------------------ Update D ------------------
logger.log("Optimizing Discriminator...")
logger.log(fmt_row(13, self.reward_giver.loss_name))
assert len(observation) == self.timesteps_per_batch
batch_size = self.timesteps_per_batch // self.d_step
# NOTE: uses only the last g step for observation
d_losses = [] # list of tuples, each of which gives the loss for a minibatch
# NOTE: for recurrent policies, use shuffle=False?
for ob_batch, ac_batch in dataset.iterbatches((observation, action),
include_final_partial_batch=False,
batch_size=batch_size,
shuffle=True):
ob_expert, ac_expert = self.expert_dataset.get_next_batch()
# update running mean/std for reward_giver
if self.reward_giver.normalize:
self.reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))
# Reshape actions if needed when using discrete actions
if isinstance(self.action_space, gym.spaces.Discrete):
if len(ac_batch.shape) == 2:
ac_batch = ac_batch[:, 0]
if len(ac_expert.shape) == 2:
ac_expert = ac_expert[:, 0]
*newlosses, grad = self.reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)
self.d_adam.update(self.allmean(grad), self.d_stepsize)
d_losses.append(newlosses)
logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
# lr: lengths and rewards
lr_local = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local) # list of tuples
lens, rews, true_rets = map(flatten_lists, zip(*list_lr_pairs))
true_reward_buffer.extend(true_rets)
else:
# lr: lengths and rewards
lr_local = (seg["ep_lens"], seg["ep_rets"]) # local values
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local) # list of tuples
lens, rews = map(flatten_lists, zip(*list_lr_pairs))
len_buffer.extend(lens)
reward_buffer.extend(rews)
if len(len_buffer) > 0:
logger.record_tabular("EpLenMean", np.mean(len_buffer))
logger.record_tabular("EpRewMean", np.mean(reward_buffer))
if self.using_gail:
logger.record_tabular("EpTrueRewMean", np.mean(true_reward_buffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and self.rank == 0:
logger.dump_tabular()
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
if self.using_gail and self.expert_dataset is not None:
# Exit processes to pickle the dataset
self.expert_dataset.prepare_pickling()
data = {
"gamma": self.gamma,
"timesteps_per_batch": self.timesteps_per_batch,
"max_kl": self.max_kl,
"cg_iters": self.cg_iters,
"lam": self.lam,
"entcoeff": self.entcoeff,
"cg_damping": self.cg_damping,
"vf_stepsize": self.vf_stepsize,
"vf_iters": self.vf_iters,
"hidden_size_adversary": self.hidden_size_adversary,
"adversary_entcoeff": self.adversary_entcoeff,
"expert_dataset": self.expert_dataset,
"g_step": self.g_step,
"d_step": self.d_step,
"d_stepsize": self.d_stepsize,
"using_gail": self.using_gail,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| 54.162662 | 121 | 0.524742 | import time
from contextlib import contextmanager
from collections import deque
import gym
from mpi4py import MPI
import tensorflow as tf
import numpy as np
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.tf_util import total_episode_reward_logger
from stable_baselines.common import explained_variance, zipsame, dataset, fmt_row, colorize, ActorCriticRLModel, \
SetVerbosity, TensorboardWriter
from stable_baselines import logger
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.cg import conjugate_gradient
from stable_baselines.common.policies import ActorCriticPolicy
from stable_baselines.common.misc_util import flatten_lists
from stable_baselines.common.runners import traj_segment_generator
from stable_baselines.trpo_mpi.utils import add_vtarg_and_adv
class TRPO(ActorCriticRLModel):
def __init__(self, policy, env, gamma=0.99, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, lam=0.98,
entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters=3, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=1):
super(TRPO, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.using_gail = False
self.timesteps_per_batch = timesteps_per_batch
self.cg_iters = cg_iters
self.cg_damping = cg_damping
self.gamma = gamma
self.lam = lam
self.max_kl = max_kl
self.vf_iters = vf_iters
self.vf_stepsize = vf_stepsize
self.entcoeff = entcoeff
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.hidden_size_adversary = 100
self.adversary_entcoeff = 1e-3
self.expert_dataset = None
self.g_step = 1
self.d_step = 1
self.d_stepsize = 3e-4
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.assign_old_eq_new = None
self.compute_losses = None
self.compute_lossandgrad = None
self.compute_fvp = None
self.compute_vflossandgrad = None
self.d_adam = None
self.vfadam = None
self.get_flat = None
self.set_from_flat = None
self.timed = None
self.allmean = None
self.nworkers = None
self.rank = None
self.reward_giver = None
self.step = None
self.proba_step = None
self.initial_state = None
self.params = None
self.summary = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_pi
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, action_ph, policy.policy
return policy.obs_ph, action_ph, policy.deterministic_action
def setup_model(self):
from stable_baselines.gail.adversary import TransitionClassifier
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the TRPO model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.nworkers = MPI.COMM_WORLD.Get_size()
self.rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
if self.using_gail:
self.reward_giver = TransitionClassifier(self.observation_space, self.action_space,
self.hidden_size_adversary,
entcoeff=self.adversary_entcoeff)
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("oldpi", reuse=False):
old_policy = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
atarg = tf.placeholder(dtype=tf.float32, shape=[None])
ret = tf.placeholder(dtype=tf.float32, shape=[None])
observation = self.policy_pi.obs_ph
action = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_policy.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = self.entcoeff * meanent
vferr = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action) -
old_policy.proba_distribution.logp(action))
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
self.loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = tf_util.get_trainable_vars("model")
var_list = [v for v in all_var_list if "/vf" not in v.name and "/q/" not in v.name]
vf_var_list = [v for v in all_var_list if "/pi" not in v.name and "/logstd" not in v.name]
self.get_flat = tf_util.GetFlat(var_list, sess=self.sess)
self.set_from_flat = tf_util.SetFromFlat(var_list, sess=self.sess)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
var_size = tf_util.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start: start + var_size], shape))
start += var_size
gvp = tf.add_n([tf.reduce_sum(grad * tangent)
for (grad, tangent) in zipsame(klgrads, tangents)])
fvp = tf_util.flatgrad(gvp, var_list)
tf.summary.scalar('entropy_loss', meanent)
tf.summary.scalar('policy_gradient_loss', optimgain)
tf.summary.scalar('value_function_loss', surrgain)
tf.summary.scalar('approximate_kullback-leibler', meankl)
tf.summary.scalar('loss', optimgain + meankl + entbonus + surrgain + meanent)
self.assign_old_eq_new = \
tf_util.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"),
tf_util.get_globals_vars("model"))])
self.compute_losses = tf_util.function([observation, old_policy.obs_ph, action, atarg], losses)
self.compute_fvp = tf_util.function([flat_tangent, observation, old_policy.obs_ph, action, atarg],
fvp)
self.compute_vflossandgrad = tf_util.function([observation, old_policy.obs_ph, ret],
tf_util.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if self.rank == 0 and self.verbose >= 1:
print(colorize(msg, color='magenta'))
start_time = time.time()
yield
print(colorize("done in {:.3f} seconds".format((time.time() - start_time)),
color='magenta'))
else:
yield
def allmean(arr):
assert isinstance(arr, np.ndarray)
out = np.empty_like(arr)
MPI.COMM_WORLD.Allreduce(arr, out, op=MPI.SUM)
out /= self.nworkers
return out
tf_util.initialize(sess=self.sess)
th_init = self.get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
self.set_from_flat(th_init)
with tf.variable_scope("Adam_mpi", reuse=False):
self.vfadam = MpiAdam(vf_var_list, sess=self.sess)
if self.using_gail:
self.d_adam = MpiAdam(self.reward_giver.get_trainable_variables(), sess=self.sess)
self.d_adam.sync()
self.vfadam.sync()
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.vf_stepsize))
tf.summary.scalar('advantage', tf.reduce_mean(atarg))
tf.summary.scalar('kl_clip_range', tf.reduce_mean(self.max_kl))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', ret)
tf.summary.histogram('learning_rate', self.vf_stepsize)
tf.summary.histogram('advantage', atarg)
tf.summary.histogram('kl_clip_range', self.max_kl)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', observation)
else:
tf.summary.histogram('observation', observation)
self.timed = timed
self.allmean = allmean
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
self.params = tf_util.get_trainable_vars("model") + tf_util.get_trainable_vars("oldpi")
if self.using_gail:
self.params.extend(self.reward_giver.get_trainable_variables())
self.summary = tf.summary.merge_all()
self.compute_lossandgrad = \
tf_util.function([observation, old_policy.obs_ph, action, atarg, ret],
[self.summary, tf_util.flatgrad(optimgain, var_list)] + losses)
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="TRPO",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
with self.sess.as_default():
callback.on_training_start(locals(), globals())
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_batch,
reward_giver=self.reward_giver,
gail=self.using_gail, callback=callback)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
len_buffer = deque(maxlen=40)
reward_buffer = deque(maxlen=40)
true_reward_buffer = None
if self.using_gail:
true_reward_buffer = deque(maxlen=40)
batchsize = self.timesteps_per_batch // self.d_step
self.expert_dataset.init_dataloader(batchsize)
while True:
if timesteps_so_far >= total_timesteps:
break
logger.log("********** Iteration %i ************" % iters_so_far)
def fisher_vector_product(vec):
return self.allmean(self.compute_fvp(vec, *fvpargs, sess=self.sess)) + self.cg_damping * vec
logger.log("Optimizing Policy...")
mean_losses = None
vpredbefore = None
tdlamret = None
observation = None
action = None
seg = None
for k in range(self.g_step):
with self.timed("sampling"):
seg = seg_gen.__next__()
if not seg.get('continue_training', True):
break
add_vtarg_and_adv(seg, self.gamma, self.lam)
observation, action = seg["observations"], seg["actions"]
atarg, tdlamret = seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"]
atarg = (atarg - atarg.mean()) / (atarg.std() + 1e-8)
if writer is not None:
total_episode_reward_logger(self.episode_reward,
seg["true_rewards"].reshape(
(self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
args = seg["observations"], seg["observations"], seg["actions"], atarg
fvpargs = [arr[::5] for arr in args]
self.assign_old_eq_new(sess=self.sess)
with self.timed("computegrad"):
steps = self.num_timesteps + (k + 1) * (seg["total_timestep"] / self.g_step)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata() if self.full_tensorboard_log else None
if writer is not None:
summary, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
if self.full_tensorboard_log:
writer.add_run_metadata(run_metadata, 'step%d' % steps)
writer.add_summary(summary, steps)
else:
_, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
lossbefore = self.allmean(np.array(lossbefore))
grad = self.allmean(grad)
if np.allclose(grad, 0):
logger.log("Got zero gradient. not updating")
else:
with self.timed("conjugate_gradient"):
stepdir = conjugate_gradient(fisher_vector_product, grad, cg_iters=self.cg_iters,
verbose=self.rank == 0 and self.verbose >= 1)
assert np.isfinite(stepdir).all()
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lagrange_multiplier = np.sqrt(abs(shs) / self.max_kl)
fullstep = stepdir / lagrange_multiplier
expectedimprove = grad.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = self.get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
self.set_from_flat(thnew)
mean_losses = surr, kl_loss, *_ = self.allmean(
np.array(self.compute_losses(*args, sess=self.sess)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(mean_losses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl_loss > self.max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
self.set_from_flat(thbefore)
if self.nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), self.vfadam.getflat().sum()))
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (loss_name, loss_val) in zip(self.loss_names, mean_losses):
logger.record_tabular(loss_name, loss_val)
with self.timed("vf"):
for _ in range(self.vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["observations"], seg["tdlamret"]),
include_final_partial_batch=False,
batch_size=128,
shuffle=True):
grad = self.allmean(self.compute_vflossandgrad(mbob, mbob, mbret, sess=self.sess))
self.vfadam.update(grad, self.vf_stepsize)
if not seg.get('continue_training', True):
break
logger.record_tabular("explained_variance_tdlam_before",
explained_variance(vpredbefore, tdlamret))
if self.using_gail:
logger.log("Optimizing Discriminator...")
logger.log(fmt_row(13, self.reward_giver.loss_name))
assert len(observation) == self.timesteps_per_batch
batch_size = self.timesteps_per_batch // self.d_step
d_losses = []
for ob_batch, ac_batch in dataset.iterbatches((observation, action),
include_final_partial_batch=False,
batch_size=batch_size,
shuffle=True):
ob_expert, ac_expert = self.expert_dataset.get_next_batch()
if self.reward_giver.normalize:
self.reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))
if isinstance(self.action_space, gym.spaces.Discrete):
if len(ac_batch.shape) == 2:
ac_batch = ac_batch[:, 0]
if len(ac_expert.shape) == 2:
ac_expert = ac_expert[:, 0]
*newlosses, grad = self.reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)
self.d_adam.update(self.allmean(grad), self.d_stepsize)
d_losses.append(newlosses)
logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
lr_local = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"])
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local)
lens, rews, true_rets = map(flatten_lists, zip(*list_lr_pairs))
true_reward_buffer.extend(true_rets)
else:
lr_local = (seg["ep_lens"], seg["ep_rets"])
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local)
lens, rews = map(flatten_lists, zip(*list_lr_pairs))
len_buffer.extend(lens)
reward_buffer.extend(rews)
if len(len_buffer) > 0:
logger.record_tabular("EpLenMean", np.mean(len_buffer))
logger.record_tabular("EpRewMean", np.mean(reward_buffer))
if self.using_gail:
logger.record_tabular("EpTrueRewMean", np.mean(true_reward_buffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and self.rank == 0:
logger.dump_tabular()
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
if self.using_gail and self.expert_dataset is not None:
self.expert_dataset.prepare_pickling()
data = {
"gamma": self.gamma,
"timesteps_per_batch": self.timesteps_per_batch,
"max_kl": self.max_kl,
"cg_iters": self.cg_iters,
"lam": self.lam,
"entcoeff": self.entcoeff,
"cg_damping": self.cg_damping,
"vf_stepsize": self.vf_stepsize,
"vf_iters": self.vf_iters,
"hidden_size_adversary": self.hidden_size_adversary,
"adversary_entcoeff": self.adversary_entcoeff,
"expert_dataset": self.expert_dataset,
"g_step": self.g_step,
"d_step": self.d_step,
"d_stepsize": self.d_stepsize,
"using_gail": self.using_gail,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| true | true |
f720830ef2390c7f939ff23286a68aa2ce3b6879 | 6,591 | py | Python | mslib/msui/qt5/ui_topview_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/msui/qt5/ui_topview_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/msui/qt5/ui_topview_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mslib/msui/ui/ui_topview_window.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TopViewWindow(object):
def setupUi(self, TopViewWindow):
TopViewWindow.setObjectName("TopViewWindow")
TopViewWindow.resize(952, 782)
self.centralwidget = QtWidgets.QWidget(TopViewWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.mpl = MplTopViewWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mpl.sizePolicy().hasHeightForWidth())
self.mpl.setSizePolicy(sizePolicy)
self.mpl.setMinimumSize(QtCore.QSize(100, 100))
self.mpl.setObjectName("mpl")
self.horizontalLayout_2.addWidget(self.mpl)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btMapRedraw = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btMapRedraw.setFont(font)
self.btMapRedraw.setFlat(False)
self.btMapRedraw.setObjectName("btMapRedraw")
self.horizontalLayout.addWidget(self.btMapRedraw)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btSettings = QtWidgets.QPushButton(self.centralwidget)
self.btSettings.setObjectName("btSettings")
self.horizontalLayout.addWidget(self.btSettings)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btRoundtrip = QtWidgets.QPushButton(self.centralwidget)
self.btRoundtrip.setObjectName("btRoundtrip")
self.horizontalLayout.addWidget(self.btRoundtrip)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.cbTools = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbTools.sizePolicy().hasHeightForWidth())
self.cbTools.setSizePolicy(sizePolicy)
self.cbTools.setObjectName("cbTools")
self.cbTools.addItem("")
self.cbTools.addItem("")
self.horizontalLayout.addWidget(self.cbTools)
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.cbChangeMapSection = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbChangeMapSection.sizePolicy().hasHeightForWidth())
self.cbChangeMapSection.setSizePolicy(sizePolicy)
self.cbChangeMapSection.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self.cbChangeMapSection.setObjectName("cbChangeMapSection")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.horizontalLayout.addWidget(self.cbChangeMapSection)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout)
TopViewWindow.setCentralWidget(self.centralwidget)
self.actionCloseWindow = QtWidgets.QAction(TopViewWindow)
self.actionCloseWindow.setObjectName("actionCloseWindow")
TopViewWindow.addAction(self.actionCloseWindow)
self.retranslateUi(TopViewWindow)
self.actionCloseWindow.triggered.connect(TopViewWindow.close)
QtCore.QMetaObject.connectSlotsByName(TopViewWindow)
def retranslateUi(self, TopViewWindow):
_translate = QtCore.QCoreApplication.translate
TopViewWindow.setWindowTitle(_translate("TopViewWindow", "Top View - Mission Support System"))
self.btMapRedraw.setText(_translate("TopViewWindow", "&REDRAW"))
self.btMapRedraw.setShortcut(_translate("TopViewWindow", "R"))
self.btSettings.setText(_translate("TopViewWindow", "options"))
self.btRoundtrip.setText(_translate("TopViewWindow", "make roundtrip"))
self.cbTools.setItemText(0, _translate("TopViewWindow", "(select to open tool)"))
self.cbTools.setItemText(1, _translate("TopViewWindow", "WMS"))
self.cbChangeMapSection.setItemText(0, _translate("TopViewWindow", "to reset map select a region"))
self.cbChangeMapSection.setItemText(1, _translate("TopViewWindow", "Spitsbergen, large"))
self.cbChangeMapSection.setItemText(2, _translate("TopViewWindow", "Spitsbergen, local"))
self.cbChangeMapSection.setItemText(3, _translate("TopViewWindow", "Europe (ste)"))
self.cbChangeMapSection.setItemText(4, _translate("TopViewWindow", "Germany (ste)"))
self.cbChangeMapSection.setItemText(5, _translate("TopViewWindow", "Europe (cyl)"))
self.cbChangeMapSection.setItemText(6, _translate("TopViewWindow", "Germany (cyl)"))
self.actionCloseWindow.setText(_translate("TopViewWindow", "CloseWindow"))
self.actionCloseWindow.setShortcut(_translate("TopViewWindow", "Ctrl+W"))
from mslib.msui.mpl_qtwidget import MplTopViewWidget
| 58.327434 | 122 | 0.739645 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TopViewWindow(object):
def setupUi(self, TopViewWindow):
TopViewWindow.setObjectName("TopViewWindow")
TopViewWindow.resize(952, 782)
self.centralwidget = QtWidgets.QWidget(TopViewWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.mpl = MplTopViewWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mpl.sizePolicy().hasHeightForWidth())
self.mpl.setSizePolicy(sizePolicy)
self.mpl.setMinimumSize(QtCore.QSize(100, 100))
self.mpl.setObjectName("mpl")
self.horizontalLayout_2.addWidget(self.mpl)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btMapRedraw = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btMapRedraw.setFont(font)
self.btMapRedraw.setFlat(False)
self.btMapRedraw.setObjectName("btMapRedraw")
self.horizontalLayout.addWidget(self.btMapRedraw)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btSettings = QtWidgets.QPushButton(self.centralwidget)
self.btSettings.setObjectName("btSettings")
self.horizontalLayout.addWidget(self.btSettings)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btRoundtrip = QtWidgets.QPushButton(self.centralwidget)
self.btRoundtrip.setObjectName("btRoundtrip")
self.horizontalLayout.addWidget(self.btRoundtrip)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.cbTools = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbTools.sizePolicy().hasHeightForWidth())
self.cbTools.setSizePolicy(sizePolicy)
self.cbTools.setObjectName("cbTools")
self.cbTools.addItem("")
self.cbTools.addItem("")
self.horizontalLayout.addWidget(self.cbTools)
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.cbChangeMapSection = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbChangeMapSection.sizePolicy().hasHeightForWidth())
self.cbChangeMapSection.setSizePolicy(sizePolicy)
self.cbChangeMapSection.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self.cbChangeMapSection.setObjectName("cbChangeMapSection")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.horizontalLayout.addWidget(self.cbChangeMapSection)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout)
TopViewWindow.setCentralWidget(self.centralwidget)
self.actionCloseWindow = QtWidgets.QAction(TopViewWindow)
self.actionCloseWindow.setObjectName("actionCloseWindow")
TopViewWindow.addAction(self.actionCloseWindow)
self.retranslateUi(TopViewWindow)
self.actionCloseWindow.triggered.connect(TopViewWindow.close)
QtCore.QMetaObject.connectSlotsByName(TopViewWindow)
def retranslateUi(self, TopViewWindow):
_translate = QtCore.QCoreApplication.translate
TopViewWindow.setWindowTitle(_translate("TopViewWindow", "Top View - Mission Support System"))
self.btMapRedraw.setText(_translate("TopViewWindow", "&REDRAW"))
self.btMapRedraw.setShortcut(_translate("TopViewWindow", "R"))
self.btSettings.setText(_translate("TopViewWindow", "options"))
self.btRoundtrip.setText(_translate("TopViewWindow", "make roundtrip"))
self.cbTools.setItemText(0, _translate("TopViewWindow", "(select to open tool)"))
self.cbTools.setItemText(1, _translate("TopViewWindow", "WMS"))
self.cbChangeMapSection.setItemText(0, _translate("TopViewWindow", "to reset map select a region"))
self.cbChangeMapSection.setItemText(1, _translate("TopViewWindow", "Spitsbergen, large"))
self.cbChangeMapSection.setItemText(2, _translate("TopViewWindow", "Spitsbergen, local"))
self.cbChangeMapSection.setItemText(3, _translate("TopViewWindow", "Europe (ste)"))
self.cbChangeMapSection.setItemText(4, _translate("TopViewWindow", "Germany (ste)"))
self.cbChangeMapSection.setItemText(5, _translate("TopViewWindow", "Europe (cyl)"))
self.cbChangeMapSection.setItemText(6, _translate("TopViewWindow", "Germany (cyl)"))
self.actionCloseWindow.setText(_translate("TopViewWindow", "CloseWindow"))
self.actionCloseWindow.setShortcut(_translate("TopViewWindow", "Ctrl+W"))
from mslib.msui.mpl_qtwidget import MplTopViewWidget
| true | true |
f720841a7564a5f85416ab65a54983fa155787e0 | 282 | py | Python | Seguranca/Ping/pingmultiplo.py | Luis12368/python | 23352d75ad13bcfd09ea85ab422fdc6ae1fcc5e7 | [
"MIT"
] | 1 | 2022-03-24T21:30:53.000Z | 2022-03-24T21:30:53.000Z | Seguranca-com-python/Ping/pingmultiplo.py | Luis12368/Bootcamp-Cognizant-Cloud-Data-Engineer | 789216a6fa76e4cb1336a73ed861bcf2e9d03751 | [
"MIT"
] | null | null | null | Seguranca-com-python/Ping/pingmultiplo.py | Luis12368/Bootcamp-Cognizant-Cloud-Data-Engineer | 789216a6fa76e4cb1336a73ed861bcf2e9d03751 | [
"MIT"
] | null | null | null | import os
import time
with open('hosts.txt') as file:
dump = file.read()
dump = dump.splitlines()
for ip in dump:
print("Verificando o IP", ip)
print("-" * 60)
os.system('ping -n 2 {}'.format(ip))
print("-" * 60)
time.sleep(5)
| 18.8 | 44 | 0.524823 | import os
import time
with open('hosts.txt') as file:
dump = file.read()
dump = dump.splitlines()
for ip in dump:
print("Verificando o IP", ip)
print("-" * 60)
os.system('ping -n 2 {}'.format(ip))
print("-" * 60)
time.sleep(5)
| true | true |
f72084785a53b5effc7e214a1463c10da25fda6e | 58,018 | py | Python | salt/utils/schema.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 2 | 2018-11-08T02:59:24.000Z | 2021-01-04T00:30:50.000Z | salt/utils/schema.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 4 | 2020-09-04T10:19:34.000Z | 2020-11-09T12:55:59.000Z | salt/utils/schema.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 5 | 2017-06-16T23:48:13.000Z | 2021-04-08T17:43:48.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
:codeauthor: Alexandru Bleotu (alexandru.bleotu@morganstanley.com)
salt.utils.schema
~~~~~~~~~~~~~~~~~
Object Oriented Configuration - JSON Schema compatible generator
This code was inspired by `jsl`__, "A Python DSL for describing JSON
schemas".
.. __: https://jsl.readthedocs.io/
A configuration document or configuration document section is defined using
the py:class:`Schema`, the configuration items are defined by any of the
subclasses of py:class:`BaseSchemaItem` as attributes of a subclass of
py:class:`Schema` class.
A more complex configuration document (containing a defininitions section)
is defined using the py:class:`DefinitionsSchema`. This type of
schema supports having complex configuration items as attributes (defined
extending the py:class:`ComplexSchemaItem`). These items have other
configuration items (complex or not) as attributes, allowing to verify
more complex JSON data structures
As an example:
.. code-block:: python
class HostConfig(Schema):
title = 'Host Configuration'
description = 'This is the host configuration'
host = StringItem(
'Host',
'The looong host description',
default=None,
minimum=1
)
port = NumberItem(
description='The port number',
default=80,
required=False,
minimum=0,
inclusiveMinimum=False,
maximum=65535
)
The serialized version of the above configuration definition is:
.. code-block:: python
>>> print(HostConfig.serialize())
OrderedDict([
('$schema', 'http://json-schema.org/draft-04/schema#'),
('title', 'Host Configuration'),
('description', 'This is the host configuration'),
('type', 'object'),
('properties', OrderedDict([
('host', {'minimum': 1,
'type': 'string',
'description': 'The looong host description',
'title': 'Host'}),
('port', {'description': 'The port number',
'default': 80,
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'})
])),
('required', ['host']),
('x-ordering', ['host', 'port']),
('additionalProperties', True)]
)
>>> print(salt.utils.json.dumps(HostConfig.serialize(), indent=2))
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Host Configuration",
"description": "This is the host configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
}
},
"required": [
"host"
],
"x-ordering": [
"host",
"port"
],
"additionalProperties": false
}
The serialized version of the configuration block can be used to validate a
configuration dictionary using the `python jsonschema library`__.
.. __: https://pypi.python.org/pypi/jsonschema
.. code-block:: python
>>> import jsonschema
>>> jsonschema.validate({'host': 'localhost', 'port': 80}, HostConfig.serialize())
>>> jsonschema.validate({'host': 'localhost', 'port': -1}, HostConfig.serialize())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 478, in validate
cls(schema, *args, **kwargs).validate(instance)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 123, in validate
raise error
jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0
Failed validating 'minimum' in schema['properties']['port']:
{'default': 80,
'description': 'The port number',
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'}
On instance['port']:
-1
>>>
A configuration document can even be split into configuration sections. Let's reuse the above
``HostConfig`` class and include it in a configuration block:
.. code-block:: python
class LoggingConfig(Schema):
title = 'Logging Configuration'
description = 'This is the logging configuration'
log_level = StringItem(
'Logging Level',
'The logging level',
default='debug',
minimum=1
)
class MyConfig(Schema):
title = 'My Config'
description = 'This my configuration'
hostconfig = HostConfig()
logconfig = LoggingConfig()
The JSON Schema string version of the above is:
.. code-block:: python
>>> print salt.utils.json.dumps(MyConfig.serialize(), indent=4)
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "My Config",
"description": "This my configuration",
"type": "object",
"properties": {
"hostconfig": {
"id": "https://non-existing.saltstack.com/schemas/hostconfig.json#",
"title": "Host Configuration",
"description": "This is the host configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
}
},
"required": [
"host"
],
"x-ordering": [
"host",
"port"
],
"additionalProperties": false
},
"logconfig": {
"id": "https://non-existing.saltstack.com/schemas/logconfig.json#",
"title": "Logging Configuration",
"description": "This is the logging configuration",
"type": "object",
"properties": {
"log_level": {
"default": "debug",
"minimum": 1,
"type": "string",
"description": "The logging level",
"title": "Logging Level"
}
},
"required": [
"log_level"
],
"x-ordering": [
"log_level"
],
"additionalProperties": false
}
},
"additionalProperties": false
}
>>> import jsonschema
>>> jsonschema.validate(
{'hostconfig': {'host': 'localhost', 'port': 80},
'logconfig': {'log_level': 'debug'}},
MyConfig.serialize())
>>> jsonschema.validate(
{'hostconfig': {'host': 'localhost', 'port': -1},
'logconfig': {'log_level': 'debug'}},
MyConfig.serialize())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 478, in validate
cls(schema, *args, **kwargs).validate(instance)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 123, in validate
raise error
jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0
Failed validating 'minimum' in schema['properties']['hostconfig']['properties']['port']:
{'default': 80,
'description': 'The port number',
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'}
On instance['hostconfig']['port']:
-1
>>>
If however, you just want to use the configuration blocks for readability
and do not desire the nested dictionaries serialization, you can pass
``flatten=True`` when defining a configuration section as a configuration
subclass attribute:
.. code-block:: python
class MyConfig(Schema):
title = 'My Config'
description = 'This my configuration'
hostconfig = HostConfig(flatten=True)
logconfig = LoggingConfig(flatten=True)
The JSON Schema string version of the above is:
.. code-block:: python
>>> print(salt.utils.json.dumps(MyConfig, indent=4))
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "My Config",
"description": "This my configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
},
"log_level": {
"default": "debug",
"minimum": 1,
"type": "string",
"description": "The logging level",
"title": "Logging Level"
}
},
"x-ordering": [
"host",
"port",
"log_level"
],
"additionalProperties": false
}
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import sys
import inspect
import textwrap
import functools
# Import salt libs
import salt.utils.args
#import salt.utils.yaml
from salt.utils.odict import OrderedDict
# Import 3rd-party libs
from salt.ext import six
BASE_SCHEMA_URL = 'https://non-existing.saltstack.com/schemas'
RENDER_COMMENT_YAML_MAX_LINE_LENGTH = 80
class Prepareable(type):
'''
Preserve attributes order for python 2.x
'''
# This code was taken from
# https://github.com/aromanovich/jsl/blob/master/jsl/_compat/prepareable.py
# which in turn was taken from https://gist.github.com/DasIch/5562625 with minor fixes
if not six.PY3:
def __new__(mcs, name, bases, attributes):
try:
constructor = attributes["__new__"]
except KeyError:
return type.__new__(mcs, name, bases, attributes)
def preparing_constructor(mcs, name, bases, attributes):
try:
mcs.__prepare__
except AttributeError:
return constructor(mcs, name, bases, attributes)
namespace = mcs.__prepare__(name, bases)
defining_frame = sys._getframe(1)
for constant in reversed(defining_frame.f_code.co_consts):
if inspect.iscode(constant) and constant.co_name == name:
def get_index(attribute_name, _names=constant.co_names): # pylint: disable=cell-var-from-loop
try:
return _names.index(attribute_name)
except ValueError:
return 0
break
else:
return constructor(mcs, name, bases, attributes)
by_appearance = sorted(
attributes.items(), key=lambda item: get_index(item[0])
)
for key, value in by_appearance:
namespace[key] = value
return constructor(mcs, name, bases, namespace)
attributes["__new__"] = functools.wraps(constructor)(preparing_constructor)
return type.__new__(mcs, name, bases, attributes)
class NullSentinel(object):
'''
A class which instance represents a null value.
Allows specifying fields with a default value of null.
'''
def __bool__(self):
return False
__nonzero__ = __bool__
Null = NullSentinel()
'''
A special value that can be used to set the default value
of a field to null.
'''
# make sure nobody creates another Null value
def _failing_new(*args, **kwargs):
raise TypeError('Can\'t create another NullSentinel instance')
NullSentinel.__new__ = staticmethod(_failing_new)
del _failing_new
class SchemaMeta(six.with_metaclass(Prepareable, type)):
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Mark the instance as a configuration document/section
attrs['__config__'] = True
attrs['__flatten__'] = False
attrs['__config_name__'] = None
# Let's record the configuration items/sections
items = {}
sections = {}
order = []
# items from parent classes
for base in reversed(bases):
if hasattr(base, '_items'):
items.update(base._items)
if hasattr(base, '_sections'):
sections.update(base._sections)
if hasattr(base, '_order'):
order.extend(base._order)
# Iterate through attrs to discover items/config sections
for key, value in six.iteritems(attrs):
entry_name = None
if not hasattr(value, '__item__') and not hasattr(value, '__config__'):
continue
if hasattr(value, '__item__'):
# the value is an item instance
if hasattr(value, 'title') and value.title is None:
# It's an item instance without a title, make the title
# it's name
value.title = key
entry_name = value.__item_name__ or key
items[entry_name] = value
if hasattr(value, '__config__'):
entry_name = value.__config_name__ or key
sections[entry_name] = value
order.append(entry_name)
attrs['_order'] = order
attrs['_items'] = items
attrs['_sections'] = sections
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, flatten=False, allow_additional_items=False, **kwargs):
instance = object.__new__(cls)
instance.__config_name__ = kwargs.pop('name', None)
if flatten is True:
# This configuration block is to be treated as a part of the
# configuration for which it was defined as an attribute, not as
# it's own sub configuration
instance.__flatten__ = True
if allow_additional_items is True:
# The configuration block only accepts the configuration items
# which are defined on the class. On additional items, validation
# with jsonschema will fail
instance.__allow_additional_items__ = True
instance.__init__(**kwargs)
return instance
class BaseSchemaItemMeta(six.with_metaclass(Prepareable, type)):
'''
Config item metaclass to "tag" the class as a configuration item
'''
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Register the class as an item class
attrs['__item__'] = True
attrs['__item_name__'] = None
# Instantiate an empty list to store the config item attribute names
attributes = []
for base in reversed(bases):
try:
base_attributes = getattr(base, '_attributes', [])
if base_attributes:
attributes.extend(base_attributes)
# Extend the attributes with the base argspec argument names
# but skip "self"
for argname in salt.utils.args.get_function_argspec(base.__init__).args:
if argname == 'self' or argname in attributes:
continue
if argname == 'name':
continue
attributes.append(argname)
except TypeError:
# On the base object type, __init__ is just a wrapper which
# triggers a TypeError when we're trying to find out it's
# argspec
continue
attrs['_attributes'] = attributes
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
# Create the instance class
instance = object.__new__(cls)
if args:
raise RuntimeError(
'Please pass all arguments as named arguments. Un-named '
'arguments are not supported'
)
for key in kwargs.copy():
# Store the kwarg keys as the instance attributes for the
# serialization step
if key == 'name':
# This is the item name to override the class attribute name
instance.__item_name__ = kwargs.pop(key)
continue
if key not in instance._attributes:
instance._attributes.append(key)
# Init the class
instance.__init__(*args, **kwargs)
# Validate the instance after initialization
for base in reversed(inspect.getmro(cls)):
validate_attributes = getattr(base, '__validate_attributes__', None)
if validate_attributes:
if instance.__validate_attributes__.__func__.__code__ is not validate_attributes.__code__:
# The method was overridden, run base.__validate_attributes__ function
base.__validate_attributes__(instance)
# Finally, run the instance __validate_attributes__ function
instance.__validate_attributes__()
# Return the initialized class
return instance
class Schema(six.with_metaclass(SchemaMeta, object)):
'''
Configuration definition class
'''
# Define some class level attributes to make PyLint happier
title = None
description = None
_items = _sections = _order = None
__flatten__ = False
__allow_additional_items__ = False
@classmethod
def serialize(cls, id_=None):
# The order matters
serialized = OrderedDict()
if id_ is not None:
# This is meant as a configuration section, sub json schema
serialized['id'] = '{0}/{1}.json#'.format(BASE_SCHEMA_URL, id_)
else:
# Main configuration block, json schema
serialized['$schema'] = 'http://json-schema.org/draft-04/schema#'
if cls.title is not None:
serialized['title'] = cls.title
if cls.description is not None:
if cls.description == cls.__doc__:
serialized['description'] = textwrap.dedent(cls.description).strip()
else:
serialized['description'] = cls.description
required = []
ordering = []
serialized['type'] = 'object'
properties = OrderedDict()
cls.after_items_update = []
for name in cls._order: # pylint: disable=E1133
skip_order = False
item_name = None
if name in cls._sections: # pylint: disable=E1135
section = cls._sections[name]
serialized_section = section.serialize(None if section.__flatten__ is True else name)
if section.__flatten__ is True:
# Flatten the configuration section into the parent
# configuration
properties.update(serialized_section['properties'])
if 'x-ordering' in serialized_section:
ordering.extend(serialized_section['x-ordering'])
if 'required' in serialized_section:
required.extend(serialized_section['required'])
if hasattr(section, 'after_items_update'):
cls.after_items_update.extend(section.after_items_update)
skip_order = True
else:
# Store it as a configuration section
properties[name] = serialized_section
if name in cls._items: # pylint: disable=E1135
config = cls._items[name]
item_name = config.__item_name__ or name
# Handle the configuration items defined in the class instance
if config.__flatten__ is True:
serialized_config = config.serialize()
cls.after_items_update.append(serialized_config)
skip_order = True
else:
properties[item_name] = config.serialize()
if config.required:
# If it's a required item, add it to the required list
required.append(item_name)
if skip_order is False:
# Store the order of the item
if item_name is not None:
if item_name not in ordering:
ordering.append(item_name)
else:
if name not in ordering:
ordering.append(name)
if properties:
serialized['properties'] = properties
# Update the serialized object with any items to include after properties.
# Do not overwrite properties already existing in the serialized dict.
if cls.after_items_update:
after_items_update = {}
for entry in cls.after_items_update:
for name, data in six.iteritems(entry):
if name in after_items_update:
if isinstance(after_items_update[name], list):
after_items_update[name].extend(data)
else:
after_items_update[name] = data
if after_items_update:
after_items_update.update(serialized)
serialized = after_items_update
if required:
# Only include required if not empty
serialized['required'] = required
if ordering:
# Only include ordering if not empty
serialized['x-ordering'] = ordering
serialized['additionalProperties'] = cls.__allow_additional_items__
return serialized
@classmethod
def defaults(cls):
serialized = cls.serialize()
defaults = {}
for name, details in serialized['properties'].items():
if 'default' in details:
defaults[name] = details['default']
continue
if 'properties' in details:
for sname, sdetails in details['properties'].items():
if 'default' in sdetails:
defaults.setdefault(name, {})[sname] = sdetails['default']
continue
return defaults
@classmethod
def as_requirements_item(cls):
serialized_schema = cls.serialize()
required = serialized_schema.get('required', [])
for name in serialized_schema['properties']:
if name not in required:
required.append(name)
return RequirementsItem(requirements=required)
#@classmethod
#def render_as_rst(cls):
# '''
# Render the configuration block as a restructured text string
# '''
# # TODO: Implement RST rendering
# raise NotImplementedError
#@classmethod
#def render_as_yaml(cls):
# '''
# Render the configuration block as a parseable YAML string including comments
# '''
# # TODO: Implement YAML rendering
# raise NotImplementedError
class SchemaItem(six.with_metaclass(BaseSchemaItemMeta, object)):
'''
Base configuration items class.
All configurations must subclass it
'''
# Define some class level attributes to make PyLint happier
__type__ = None
__format__ = None
_attributes = None
__flatten__ = False
__serialize_attr_aliases__ = None
required = False
def __init__(self, required=None, **extra):
'''
:param required: If the configuration item is required. Defaults to ``False``.
'''
if required is not None:
self.required = required
self.extra = extra
def __validate_attributes__(self):
'''
Run any validation check you need the instance attributes.
ATTENTION:
Don't call the parent class when overriding this
method because it will just duplicate the executions. This class'es
metaclass will take care of that.
'''
if self.required not in (True, False):
raise RuntimeError(
'\'required\' can only be True/False'
)
def _get_argname_value(self, argname):
'''
Return the argname value looking up on all possible attributes
'''
# Let's see if there's a private function to get the value
argvalue = getattr(self, '__get_{0}__'.format(argname), None)
if argvalue is not None and callable(argvalue):
argvalue = argvalue()
if argvalue is None:
# Let's see if the value is defined as a public class variable
argvalue = getattr(self, argname, None)
if argvalue is None:
# Let's see if it's defined as a private class variable
argvalue = getattr(self, '__{0}__'.format(argname), None)
if argvalue is None:
# Let's look for it in the extra dictionary
argvalue = self.extra.get(argname, None)
return argvalue
def serialize(self):
'''
Return a serializable form of the config instance
'''
raise NotImplementedError
class BaseSchemaItem(SchemaItem):
'''
Base configuration items class.
All configurations must subclass it
'''
# Let's define description as a class attribute, this will allow a custom configuration
# item to do something like:
# class MyCustomConfig(StringItem):
# '''
# This is my custom config, blah, blah, blah
# '''
# description = __doc__
#
description = None
# The same for all other base arguments
title = None
default = None
enum = None
enumNames = None
def __init__(self, title=None, description=None, default=None, enum=None, enumNames=None, **kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
'''
if title is not None:
self.title = title
if description is not None:
self.description = description
if default is not None:
self.default = default
if enum is not None:
self.enum = enum
if enumNames is not None:
self.enumNames = enumNames
super(BaseSchemaItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if self.enum is not None:
if not isinstance(self.enum, (list, tuple, set)):
raise RuntimeError(
'Only the \'list\', \'tuple\' and \'set\' python types can be used '
'to define \'enum\''
)
if not isinstance(self.enum, list):
self.enum = list(self.enum)
if self.enumNames is not None:
if not isinstance(self.enumNames, (list, tuple, set)):
raise RuntimeError(
'Only the \'list\', \'tuple\' and \'set\' python types can be used '
'to define \'enumNames\''
)
if len(self.enum) != len(self.enumNames):
raise RuntimeError(
'The size of \'enumNames\' must match the size of \'enum\''
)
if not isinstance(self.enumNames, list):
self.enumNames = list(self.enumNames)
def serialize(self):
'''
Return a serializable form of the config instance
'''
serialized = {'type': self.__type__}
for argname in self._attributes:
if argname == 'required':
# This is handled elsewhere
continue
argvalue = self._get_argname_value(argname)
if argvalue is not None:
if argvalue is Null:
argvalue = None
# None values are not meant to be included in the
# serialization, since this is not None...
if self.__serialize_attr_aliases__ and argname in self.__serialize_attr_aliases__:
argname = self.__serialize_attr_aliases__[argname]
serialized[argname] = argvalue
return serialized
def __get_description__(self):
if self.description is not None:
if self.description == self.__doc__:
return textwrap.dedent(self.description).strip()
return self.description
#def render_as_rst(self, name):
# '''
# Render the configuration item as a restructured text string
# '''
# # TODO: Implement YAML rendering
# raise NotImplementedError
#def render_as_yaml(self, name):
# '''
# Render the configuration item as a parseable YAML string including comments
# '''
# # TODO: Include the item rules in the output, minimum, maximum, etc...
# output = '# ----- '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 2)
# output += '>\n'
# if self.description:
# output += '\n'.join(textwrap.wrap(self.description,
# width=RENDER_COMMENT_YAML_MAX_LINE_LENGTH,
# initial_indent='# '))
# output += '\n'
# yamled_default_value = salt.utils.yaml.safe_dump(self.default, default_flow_style=False).split('\n...', 1)[0]
# output += '# Default: {0}\n'.format(yamled_default_value)
# output += '#{0}: {1}\n'.format(name, yamled_default_value)
# output += '# <---- '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 1)
# return output + '\n'
class NullItem(BaseSchemaItem):
__type__ = 'null'
class BooleanItem(BaseSchemaItem):
__type__ = 'boolean'
class StringItem(BaseSchemaItem):
'''
A string configuration field
'''
__type__ = 'string'
__serialize_attr_aliases__ = {
'min_length': 'minLength',
'max_length': 'maxLength'
}
format = None
pattern = None
min_length = None
max_length = None
def __init__(self,
format=None, # pylint: disable=redefined-builtin
pattern=None,
min_length=None,
max_length=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param format:
A semantic format of the string (for example, ``"date-time"``, ``"email"``, or ``"uri"``).
:param pattern:
A regular expression (ECMA 262) that a string value must match.
:param min_length:
The minimum length
:param max_length:
The maximum length
'''
if format is not None: # pylint: disable=redefined-builtin
self.format = format
if pattern is not None:
self.pattern = pattern
if min_length is not None:
self.min_length = min_length
if max_length is not None:
self.max_length = max_length
super(StringItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if self.format is None and self.__format__ is not None:
self.format = self.__format__
class EMailItem(StringItem):
'''
An internet email address, see `RFC 5322, section 3.4.1`__.
.. __: http://tools.ietf.org/html/rfc5322
'''
__format__ = 'email'
class IPv4Item(StringItem):
'''
An IPv4 address configuration field, according to dotted-quad ABNF syntax as defined in
`RFC 2673, section 3.2`__.
.. __: http://tools.ietf.org/html/rfc2673
'''
__format__ = 'ipv4'
class IPv6Item(StringItem):
'''
An IPv6 address configuration field, as defined in `RFC 2373, section 2.2`__.
.. __: http://tools.ietf.org/html/rfc2373
'''
__format__ = 'ipv6'
class HostnameItem(StringItem):
'''
An Internet host name configuration field, see `RFC 1034, section 3.1`__.
.. __: http://tools.ietf.org/html/rfc1034
'''
__format__ = 'hostname'
class DateTimeItem(StringItem):
'''
An ISO 8601 formatted date-time configuration field, as defined by `RFC 3339, section 5.6`__.
.. __: http://tools.ietf.org/html/rfc3339
'''
__format__ = 'date-time'
class UriItem(StringItem):
'''
A universal resource identifier (URI) configuration field, according to `RFC3986`__.
.. __: http://tools.ietf.org/html/rfc3986
'''
__format__ = 'uri'
class SecretItem(StringItem):
'''
A string configuration field containing a secret, for example, passwords, API keys, etc
'''
__format__ = 'secret'
class NumberItem(BaseSchemaItem):
__type__ = 'number'
__serialize_attr_aliases__ = {
'multiple_of': 'multipleOf',
'exclusive_minimum': 'exclusiveMinimum',
'exclusive_maximum': 'exclusiveMaximum',
}
multiple_of = None
minimum = None
exclusive_minimum = None
maximum = None
exclusive_maximum = None
def __init__(self,
multiple_of=None,
minimum=None,
exclusive_minimum=None,
maximum=None,
exclusive_maximum=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param multiple_of:
A value must be a multiple of this factor.
:param minimum:
The minimum allowed value
:param exclusive_minimum:
Whether a value is allowed to be exactly equal to the minimum
:param maximum:
The maximum allowed value
:param exclusive_maximum:
Whether a value is allowed to be exactly equal to the maximum
'''
if multiple_of is not None:
self.multiple_of = multiple_of
if minimum is not None:
self.minimum = minimum
if exclusive_minimum is not None:
self.exclusive_minimum = exclusive_minimum
if maximum is not None:
self.maximum = maximum
if exclusive_maximum is not None:
self.exclusive_maximum = exclusive_maximum
super(NumberItem, self).__init__(**kwargs)
class IntegerItem(NumberItem):
__type__ = 'integer'
class ArrayItem(BaseSchemaItem):
__type__ = 'array'
__serialize_attr_aliases__ = {
'min_items': 'minItems',
'max_items': 'maxItems',
'unique_items': 'uniqueItems',
'additional_items': 'additionalItems'
}
items = None
min_items = None
max_items = None
unique_items = None
additional_items = None
def __init__(self,
items=None,
min_items=None,
max_items=None,
unique_items=None,
additional_items=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param items:
Either of the following:
* :class:`BaseSchemaItem` -- all items of the array must match the field schema;
* a list or a tuple of :class:`fields <.BaseSchemaItem>` -- all items of the array must be
valid according to the field schema at the corresponding index (tuple typing);
:param min_items:
Minimum length of the array
:param max_items:
Maximum length of the array
:param unique_items:
Whether all the values in the array must be distinct.
:param additional_items:
If the value of ``items`` is a list or a tuple, and the array length is larger than
the number of fields in ``items``, then the additional items are described
by the :class:`.BaseField` passed using this argument.
:type additional_items: bool or :class:`.BaseSchemaItem`
'''
if items is not None:
self.items = items
if min_items is not None:
self.min_items = min_items
if max_items is not None:
self.max_items = max_items
if unique_items is not None:
self.unique_items = unique_items
if additional_items is not None:
self.additional_items = additional_items
super(ArrayItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if not self.items and not self.additional_items:
raise RuntimeError(
'One of items or additional_items must be passed.'
)
if self.items is not None:
if isinstance(self.items, (list, tuple)):
for item in self.items:
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
'All items passed in the item argument tuple/list must be '
'a subclass of Schema, SchemaItem or BaseSchemaItem, '
'not {0}'.format(type(item))
)
elif not isinstance(self.items, (Schema, SchemaItem)):
raise RuntimeError(
'The items argument passed must be a subclass of '
'Schema, SchemaItem or BaseSchemaItem, not '
'{0}'.format(type(self.items))
)
def __get_items__(self):
if isinstance(self.items, (Schema, SchemaItem)):
# This is either a Schema or a Basetem, return it in it's
# serialized form
return self.items.serialize()
if isinstance(self.items, (tuple, list)):
items = []
for item in self.items:
items.append(item.serialize())
return items
class DictItem(BaseSchemaItem):
__type__ = 'object'
__serialize_attr_aliases__ = {
'min_properties': 'minProperties',
'max_properties': 'maxProperties',
'pattern_properties': 'patternProperties',
'additional_properties': 'additionalProperties'
}
properties = None
pattern_properties = None
additional_properties = None
min_properties = None
max_properties = None
def __init__(self,
properties=None,
pattern_properties=None,
additional_properties=None,
min_properties=None,
max_properties=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:type required:
boolean
:param title:
A short explanation about the purpose of the data described by this item.
:type title:
str
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param properties:
A dictionary containing fields
:param pattern_properties:
A dictionary whose keys are regular expressions (ECMA 262).
Properties match against these regular expressions, and for any that match,
the property is described by the corresponding field schema.
:type pattern_properties: dict[str -> :class:`.Schema` or
:class:`.SchemaItem` or :class:`.BaseSchemaItem`]
:param additional_properties:
Describes properties that are not described by the ``properties`` or ``pattern_properties``.
:type additional_properties: bool or :class:`.Schema` or :class:`.SchemaItem`
or :class:`.BaseSchemaItem`
:param min_properties:
A minimum number of properties.
:type min_properties: int
:param max_properties:
A maximum number of properties
:type max_properties: int
'''
if properties is not None:
self.properties = properties
if pattern_properties is not None:
self.pattern_properties = pattern_properties
if additional_properties is not None:
self.additional_properties = additional_properties
if min_properties is not None:
self.min_properties = min_properties
if max_properties is not None:
self.max_properties = max_properties
super(DictItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if not self.properties and not self.pattern_properties and not self.additional_properties:
raise RuntimeError(
'One of properties, pattern_properties or additional_properties must be passed'
)
if self.properties is not None:
if not isinstance(self.properties, (Schema, dict)):
raise RuntimeError(
'The passed properties must be passed as a dict or '
' a Schema not \'{0}\''.format(type(self.properties))
)
if not isinstance(self.properties, Schema):
for key, prop in self.properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
'The passed property who\'s key is \'{0}\' must be of type '
'Schema, SchemaItem or BaseSchemaItem, not '
'\'{1}\''.format(key, type(prop))
)
if self.pattern_properties is not None:
if not isinstance(self.pattern_properties, dict):
raise RuntimeError(
'The passed pattern_properties must be passed as a dict '
'not \'{0}\''.format(type(self.pattern_properties))
)
for key, prop in self.pattern_properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
'The passed pattern_property who\'s key is \'{0}\' must '
'be of type Schema, SchemaItem or BaseSchemaItem, '
'not \'{1}\''.format(key, type(prop))
)
if self.additional_properties is not None:
if not isinstance(self.additional_properties, (bool, Schema, SchemaItem)):
raise RuntimeError(
'The passed additional_properties must be of type bool, '
'Schema, SchemaItem or BaseSchemaItem, not \'{0}\''.format(
type(self.pattern_properties)
)
)
def __get_properties__(self):
if self.properties is None:
return
if isinstance(self.properties, Schema):
return self.properties.serialize()['properties']
properties = OrderedDict()
for key, prop in self.properties.items():
properties[key] = prop.serialize()
return properties
def __get_pattern_properties__(self):
if self.pattern_properties is None:
return
pattern_properties = OrderedDict()
for key, prop in self.pattern_properties.items():
pattern_properties[key] = prop.serialize()
return pattern_properties
def __get_additional_properties__(self):
if self.additional_properties is None:
return
if isinstance(self.additional_properties, bool):
return self.additional_properties
return self.additional_properties.serialize()
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
result = super(DictItem, self).serialize()
required = []
if self.properties is not None:
if isinstance(self.properties, Schema):
serialized = self.properties.serialize()
if 'required' in serialized:
required.extend(serialized['required'])
else:
for key, prop in self.properties.items():
if prop.required:
required.append(key)
if required:
result['required'] = required
return result
class RequirementsItem(SchemaItem):
__type__ = 'object'
requirements = None
def __init__(self, requirements=None):
if requirements is not None:
self.requirements = requirements
super(RequirementsItem, self).__init__()
def __validate_attributes__(self):
if self.requirements is None:
raise RuntimeError(
'The passed requirements must not be empty'
)
if not isinstance(self.requirements, (SchemaItem, list, tuple, set)):
raise RuntimeError(
'The passed requirements must be passed as a list, tuple, '
'set SchemaItem or BaseSchemaItem, not \'{0}\''.format(self.requirements)
)
if not isinstance(self.requirements, SchemaItem):
if not isinstance(self.requirements, list):
self.requirements = list(self.requirements)
for idx, item in enumerate(self.requirements):
if not isinstance(item, (six.string_types, SchemaItem)):
raise RuntimeError(
'The passed requirement at the {0} index must be of type '
'str or SchemaItem, not \'{1}\''.format(idx, type(item))
)
def serialize(self):
if isinstance(self.requirements, SchemaItem):
requirements = self.requirements.serialize()
else:
requirements = []
for requirement in self.requirements:
if isinstance(requirement, SchemaItem):
requirements.append(requirement.serialize())
continue
requirements.append(requirement)
return {'required': requirements}
class OneOfItem(SchemaItem):
__type__ = 'oneOf'
items = None
def __init__(self, items=None, required=None):
if items is not None:
self.items = items
super(OneOfItem, self).__init__(required=required)
def __validate_attributes__(self):
if not self.items:
raise RuntimeError(
'The passed items must not be empty'
)
if not isinstance(self.items, (list, tuple)):
raise RuntimeError(
'The passed items must be passed as a list/tuple not '
'\'{0}\''.format(type(self.items))
)
for idx, item in enumerate(self.items):
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
'The passed item at the {0} index must be of type '
'Schema, SchemaItem or BaseSchemaItem, not '
'\'{1}\''.format(idx, type(item))
)
if not isinstance(self.items, list):
self.items = list(self.items)
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
return {self.__type__: [i.serialize() for i in self.items]}
class AnyOfItem(OneOfItem):
__type__ = 'anyOf'
class AllOfItem(OneOfItem):
__type__ = 'allOf'
class NotItem(SchemaItem):
__type__ = 'not'
item = None
def __init__(self, item=None):
if item is not None:
self.item = item
super(NotItem, self).__init__()
def __validate_attributes__(self):
if not self.item:
raise RuntimeError(
'An item must be passed'
)
if not isinstance(self.item, (Schema, SchemaItem)):
raise RuntimeError(
'The passed item be of type Schema, SchemaItem or '
'BaseSchemaItem, not \'{1}\''.format(type(self.item))
)
def serialize(self):
return {self.__type__: self.item.serialize()}
# ----- Custom Preconfigured Configs -------------------------------------------------------------------------------->
class PortItem(IntegerItem):
minimum = 0 # yes, 0 is a valid port number
maximum = 65535
# <---- Custom Preconfigured Configs ---------------------------------------------------------------------------------
class ComplexSchemaItem(BaseSchemaItem):
'''
.. versionadded:: 2016.11.0
Complex Schema Item
'''
# This attribute is populated by the metaclass, but pylint fails to see it
# and assumes it's not an iterable
_attributes = []
_definition_name = None
def __init__(self, definition_name=None, required=None):
super(ComplexSchemaItem, self).__init__(required=required)
self.__type__ = 'object'
self._definition_name = definition_name if definition_name else \
self.__class__.__name__
# Schema attributes might have been added as class attributes so we
# and they must be added to the _attributes attr
self._add_missing_schema_attributes()
def _add_missing_schema_attributes(self):
'''
Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically
'''
for attr in [attr for attr in dir(self) if not attr.startswith('__')]:
attr_val = getattr(self, attr)
if isinstance(getattr(self, attr), SchemaItem) and \
attr not in self._attributes:
self._attributes.append(attr)
@property
def definition_name(self):
return self._definition_name
def serialize(self):
'''
The serialization of the complex item is a pointer to the item
definition
'''
return {'$ref': '#/definitions/{0}'.format(self.definition_name)}
def get_definition(self):
'''Returns the definition of the complex item'''
serialized = super(ComplexSchemaItem, self).serialize()
# Adjust entries in the serialization
del serialized['definition_name']
serialized['title'] = self.definition_name
properties = {}
required_attr_names = []
for attr_name in self._attributes:
attr = getattr(self, attr_name)
if attr and isinstance(attr, BaseSchemaItem):
# Remove the attribute entry added by the base serialization
del serialized[attr_name]
properties[attr_name] = attr.serialize()
properties[attr_name]['type'] = attr.__type__
if attr.required:
required_attr_names.append(attr_name)
if serialized.get('properties') is None:
serialized['properties'] = {}
serialized['properties'].update(properties)
# Assign the required array
if required_attr_names:
serialized['required'] = required_attr_names
return serialized
def get_complex_attrs(self):
'''Returns a dictionary of the complex attributes'''
return [getattr(self, attr_name) for attr_name in self._attributes if
isinstance(getattr(self, attr_name), ComplexSchemaItem)]
class DefinitionsSchema(Schema):
'''
.. versionadded:: 2016.11.0
JSON schema class that supports ComplexSchemaItem objects by adding
a definitions section to the JSON schema, containing the item definitions.
All references to ComplexSchemaItems are built using schema inline
dereferencing.
'''
@classmethod
def serialize(cls, id_=None):
# Get the initial serialization
serialized = super(DefinitionsSchema, cls).serialize(id_)
complex_items = []
# Augment the serializations with the definitions of all complex items
aux_items = cls._items.values()
# Convert dict_view object to a list on Python 3
if six.PY3:
aux_items = list(aux_items)
while aux_items:
item = aux_items.pop(0)
# Add complex attributes
if isinstance(item, ComplexSchemaItem):
complex_items.append(item)
aux_items.extend(item.get_complex_attrs())
# Handle container items
if isinstance(item, OneOfItem):
aux_items.extend(item.items)
elif isinstance(item, ArrayItem):
aux_items.append(item.items)
elif isinstance(item, DictItem):
if item.properties:
aux_items.extend(item.properties.values())
if item.additional_properties and \
isinstance(item.additional_properties, SchemaItem):
aux_items.append(item.additional_properties)
definitions = OrderedDict()
for config in complex_items:
if isinstance(config, ComplexSchemaItem):
definitions[config.definition_name] = \
config.get_definition()
serialized['definitions'] = definitions
return serialized
| 36.466373 | 122 | 0.561688 |
from __future__ import absolute_import, print_function, unicode_literals
import sys
import inspect
import textwrap
import functools
import salt.utils.args
from salt.utils.odict import OrderedDict
from salt.ext import six
BASE_SCHEMA_URL = 'https://non-existing.saltstack.com/schemas'
RENDER_COMMENT_YAML_MAX_LINE_LENGTH = 80
class Prepareable(type):
if not six.PY3:
def __new__(mcs, name, bases, attributes):
try:
constructor = attributes["__new__"]
except KeyError:
return type.__new__(mcs, name, bases, attributes)
def preparing_constructor(mcs, name, bases, attributes):
try:
mcs.__prepare__
except AttributeError:
return constructor(mcs, name, bases, attributes)
namespace = mcs.__prepare__(name, bases)
defining_frame = sys._getframe(1)
for constant in reversed(defining_frame.f_code.co_consts):
if inspect.iscode(constant) and constant.co_name == name:
def get_index(attribute_name, _names=constant.co_names):
try:
return _names.index(attribute_name)
except ValueError:
return 0
break
else:
return constructor(mcs, name, bases, attributes)
by_appearance = sorted(
attributes.items(), key=lambda item: get_index(item[0])
)
for key, value in by_appearance:
namespace[key] = value
return constructor(mcs, name, bases, namespace)
attributes["__new__"] = functools.wraps(constructor)(preparing_constructor)
return type.__new__(mcs, name, bases, attributes)
class NullSentinel(object):
def __bool__(self):
return False
__nonzero__ = __bool__
Null = NullSentinel()
def _failing_new(*args, **kwargs):
raise TypeError('Can\'t create another NullSentinel instance')
NullSentinel.__new__ = staticmethod(_failing_new)
del _failing_new
class SchemaMeta(six.with_metaclass(Prepareable, type)):
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Mark the instance as a configuration document/section
attrs['__config__'] = True
attrs['__flatten__'] = False
attrs['__config_name__'] = None
# Let's record the configuration items/sections
items = {}
sections = {}
order = []
for base in reversed(bases):
if hasattr(base, '_items'):
items.update(base._items)
if hasattr(base, '_sections'):
sections.update(base._sections)
if hasattr(base, '_order'):
order.extend(base._order)
for key, value in six.iteritems(attrs):
entry_name = None
if not hasattr(value, '__item__') and not hasattr(value, '__config__'):
continue
if hasattr(value, '__item__'):
if hasattr(value, 'title') and value.title is None:
# it's name
value.title = key
entry_name = value.__item_name__ or key
items[entry_name] = value
if hasattr(value, '__config__'):
entry_name = value.__config_name__ or key
sections[entry_name] = value
order.append(entry_name)
attrs['_order'] = order
attrs['_items'] = items
attrs['_sections'] = sections
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, flatten=False, allow_additional_items=False, **kwargs):
instance = object.__new__(cls)
instance.__config_name__ = kwargs.pop('name', None)
if flatten is True:
instance.__flatten__ = True
if allow_additional_items is True:
# The configuration block only accepts the configuration items
# which are defined on the class. On additional items, validation
# with jsonschema will fail
instance.__allow_additional_items__ = True
instance.__init__(**kwargs)
return instance
class BaseSchemaItemMeta(six.with_metaclass(Prepareable, type)):
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Register the class as an item class
attrs['__item__'] = True
attrs['__item_name__'] = None
# Instantiate an empty list to store the config item attribute names
attributes = []
for base in reversed(bases):
try:
base_attributes = getattr(base, '_attributes', [])
if base_attributes:
attributes.extend(base_attributes)
# Extend the attributes with the base argspec argument names
# but skip "self"
for argname in salt.utils.args.get_function_argspec(base.__init__).args:
if argname == 'self' or argname in attributes:
continue
if argname == 'name':
continue
attributes.append(argname)
except TypeError:
# On the base object type, __init__ is just a wrapper which
# triggers a TypeError when we're trying to find out it's
# argspec
continue
attrs['_attributes'] = attributes
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
# Create the instance class
instance = object.__new__(cls)
if args:
raise RuntimeError(
'Please pass all arguments as named arguments. Un-named '
'arguments are not supported'
)
for key in kwargs.copy():
# Store the kwarg keys as the instance attributes for the
# serialization step
if key == 'name':
# This is the item name to override the class attribute name
instance.__item_name__ = kwargs.pop(key)
continue
if key not in instance._attributes:
instance._attributes.append(key)
# Init the class
instance.__init__(*args, **kwargs)
# Validate the instance after initialization
for base in reversed(inspect.getmro(cls)):
validate_attributes = getattr(base, '__validate_attributes__', None)
if validate_attributes:
if instance.__validate_attributes__.__func__.__code__ is not validate_attributes.__code__:
# The method was overridden, run base.__validate_attributes__ function
base.__validate_attributes__(instance)
# Finally, run the instance __validate_attributes__ function
instance.__validate_attributes__()
# Return the initialized class
return instance
class Schema(six.with_metaclass(SchemaMeta, object)):
# Define some class level attributes to make PyLint happier
title = None
description = None
_items = _sections = _order = None
__flatten__ = False
__allow_additional_items__ = False
@classmethod
def serialize(cls, id_=None):
# The order matters
serialized = OrderedDict()
if id_ is not None:
# This is meant as a configuration section, sub json schema
serialized['id'] = '{0}/{1}.json
else:
# Main configuration block, json schema
serialized['$schema'] = 'http://json-schema.org/draft-04/schema
if cls.title is not None:
serialized['title'] = cls.title
if cls.description is not None:
if cls.description == cls.__doc__:
serialized['description'] = textwrap.dedent(cls.description).strip()
else:
serialized['description'] = cls.description
required = []
ordering = []
serialized['type'] = 'object'
properties = OrderedDict()
cls.after_items_update = []
for name in cls._order: # pylint: disable=E1133
skip_order = False
item_name = None
if name in cls._sections: # pylint: disable=E1135
section = cls._sections[name]
serialized_section = section.serialize(None if section.__flatten__ is True else name)
if section.__flatten__ is True:
# Flatten the configuration section into the parent
# configuration
properties.update(serialized_section['properties'])
if 'x-ordering' in serialized_section:
ordering.extend(serialized_section['x-ordering'])
if 'required' in serialized_section:
required.extend(serialized_section['required'])
if hasattr(section, 'after_items_update'):
cls.after_items_update.extend(section.after_items_update)
skip_order = True
else:
# Store it as a configuration section
properties[name] = serialized_section
if name in cls._items: # pylint: disable=E1135
config = cls._items[name]
item_name = config.__item_name__ or name
# Handle the configuration items defined in the class instance
if config.__flatten__ is True:
serialized_config = config.serialize()
cls.after_items_update.append(serialized_config)
skip_order = True
else:
properties[item_name] = config.serialize()
if config.required:
# If it's a required item, add it to the required list
required.append(item_name)
if skip_order is False:
if item_name is not None:
if item_name not in ordering:
ordering.append(item_name)
else:
if name not in ordering:
ordering.append(name)
if properties:
serialized['properties'] = properties
if cls.after_items_update:
after_items_update = {}
for entry in cls.after_items_update:
for name, data in six.iteritems(entry):
if name in after_items_update:
if isinstance(after_items_update[name], list):
after_items_update[name].extend(data)
else:
after_items_update[name] = data
if after_items_update:
after_items_update.update(serialized)
serialized = after_items_update
if required:
serialized['required'] = required
if ordering:
serialized['x-ordering'] = ordering
serialized['additionalProperties'] = cls.__allow_additional_items__
return serialized
@classmethod
def defaults(cls):
serialized = cls.serialize()
defaults = {}
for name, details in serialized['properties'].items():
if 'default' in details:
defaults[name] = details['default']
continue
if 'properties' in details:
for sname, sdetails in details['properties'].items():
if 'default' in sdetails:
defaults.setdefault(name, {})[sname] = sdetails['default']
continue
return defaults
@classmethod
def as_requirements_item(cls):
serialized_schema = cls.serialize()
required = serialized_schema.get('required', [])
for name in serialized_schema['properties']:
if name not in required:
required.append(name)
return RequirementsItem(requirements=required)
# Render the configuration block as a restructured text string
# '''
Render the configuration block as a parseable YAML string including comments
# '''
h_metaclass(BaseSchemaItemMeta, object)):
__type__ = None
__format__ = None
_attributes = None
__flatten__ = False
__serialize_attr_aliases__ = None
required = False
def __init__(self, required=None, **extra):
if required is not None:
self.required = required
self.extra = extra
def __validate_attributes__(self):
if self.required not in (True, False):
raise RuntimeError(
'\'required\' can only be True/False'
)
def _get_argname_value(self, argname):
argvalue = getattr(self, '__get_{0}__'.format(argname), None)
if argvalue is not None and callable(argvalue):
argvalue = argvalue()
if argvalue is None:
argvalue = getattr(self, argname, None)
if argvalue is None:
# Let's see if it's defined as a private class variable
argvalue = getattr(self, '__{0}__'.format(argname), None)
if argvalue is None:
# Let's look for it in the extra dictionary
argvalue = self.extra.get(argname, None)
return argvalue
def serialize(self):
raise NotImplementedError
class BaseSchemaItem(SchemaItem):
# item to do something like:
# class MyCustomConfig(StringItem):
# '''
# This is my custom config, blah, blah, blah
# '''
# description = __doc__
#
description = None
# The same for all other base arguments
title = None
default = None
enum = None
enumNames = None
def __init__(self, title=None, description=None, default=None, enum=None, enumNames=None, **kwargs):
if title is not None:
self.title = title
if description is not None:
self.description = description
if default is not None:
self.default = default
if enum is not None:
self.enum = enum
if enumNames is not None:
self.enumNames = enumNames
super(BaseSchemaItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if self.enum is not None:
if not isinstance(self.enum, (list, tuple, set)):
raise RuntimeError(
'Only the \'list\', \'tuple\' and \'set\' python types can be used '
'to define \'enum\''
)
if not isinstance(self.enum, list):
self.enum = list(self.enum)
if self.enumNames is not None:
if not isinstance(self.enumNames, (list, tuple, set)):
raise RuntimeError(
'Only the \'list\', \'tuple\' and \'set\' python types can be used '
'to define \'enumNames\''
)
if len(self.enum) != len(self.enumNames):
raise RuntimeError(
'The size of \'enumNames\' must match the size of \'enum\''
)
if not isinstance(self.enumNames, list):
self.enumNames = list(self.enumNames)
def serialize(self):
serialized = {'type': self.__type__}
for argname in self._attributes:
if argname == 'required':
# This is handled elsewhere
continue
argvalue = self._get_argname_value(argname)
if argvalue is not None:
if argvalue is Null:
argvalue = None
# None values are not meant to be included in the
# serialization, since this is not None...
if self.__serialize_attr_aliases__ and argname in self.__serialize_attr_aliases__:
argname = self.__serialize_attr_aliases__[argname]
serialized[argname] = argvalue
return serialized
def __get_description__(self):
if self.description is not None:
if self.description == self.__doc__:
return textwrap.dedent(self.description).strip()
return self.description
#def render_as_rst(self, name):
# '''
# Render the configuration item as a restructured text string
# '''
# # TODO: Implement YAML rendering
# raise NotImplementedError
#def render_as_yaml(self, name):
# '''
# Render the configuration item as a parseable YAML string including comments
# '''
# # TODO: Include the item rules in the output, minimum, maximum, etc...
# output = '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 2)
# output += '>\n'
# if self.description:
# output += '\n'.join(textwrap.wrap(self.description,
# width=RENDER_COMMENT_YAML_MAX_LINE_LENGTH,
# initial_indent='
# output += '\n'
# yamled_default_value = salt.utils.yaml.safe_dump(self.default, default_flow_style=False).split('\n...', 1)[0]
# output += '
# output += '
# output += '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 1)
# return output + '\n'
class NullItem(BaseSchemaItem):
__type__ = 'null'
class BooleanItem(BaseSchemaItem):
__type__ = 'boolean'
class StringItem(BaseSchemaItem):
__type__ = 'string'
__serialize_attr_aliases__ = {
'min_length': 'minLength',
'max_length': 'maxLength'
}
format = None
pattern = None
min_length = None
max_length = None
def __init__(self,
format=None, # pylint: disable=redefined-builtin
pattern=None,
min_length=None,
max_length=None,
**kwargs):
if format is not None: # pylint: disable=redefined-builtin
self.format = format
if pattern is not None:
self.pattern = pattern
if min_length is not None:
self.min_length = min_length
if max_length is not None:
self.max_length = max_length
super(StringItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if self.format is None and self.__format__ is not None:
self.format = self.__format__
class EMailItem(StringItem):
__format__ = 'email'
class IPv4Item(StringItem):
__format__ = 'ipv4'
class IPv6Item(StringItem):
__format__ = 'ipv6'
class HostnameItem(StringItem):
__format__ = 'hostname'
class DateTimeItem(StringItem):
__format__ = 'date-time'
class UriItem(StringItem):
__format__ = 'uri'
class SecretItem(StringItem):
__format__ = 'secret'
class NumberItem(BaseSchemaItem):
__type__ = 'number'
__serialize_attr_aliases__ = {
'multiple_of': 'multipleOf',
'exclusive_minimum': 'exclusiveMinimum',
'exclusive_maximum': 'exclusiveMaximum',
}
multiple_of = None
minimum = None
exclusive_minimum = None
maximum = None
exclusive_maximum = None
def __init__(self,
multiple_of=None,
minimum=None,
exclusive_minimum=None,
maximum=None,
exclusive_maximum=None,
**kwargs):
if multiple_of is not None:
self.multiple_of = multiple_of
if minimum is not None:
self.minimum = minimum
if exclusive_minimum is not None:
self.exclusive_minimum = exclusive_minimum
if maximum is not None:
self.maximum = maximum
if exclusive_maximum is not None:
self.exclusive_maximum = exclusive_maximum
super(NumberItem, self).__init__(**kwargs)
class IntegerItem(NumberItem):
__type__ = 'integer'
class ArrayItem(BaseSchemaItem):
__type__ = 'array'
__serialize_attr_aliases__ = {
'min_items': 'minItems',
'max_items': 'maxItems',
'unique_items': 'uniqueItems',
'additional_items': 'additionalItems'
}
items = None
min_items = None
max_items = None
unique_items = None
additional_items = None
def __init__(self,
items=None,
min_items=None,
max_items=None,
unique_items=None,
additional_items=None,
**kwargs):
if items is not None:
self.items = items
if min_items is not None:
self.min_items = min_items
if max_items is not None:
self.max_items = max_items
if unique_items is not None:
self.unique_items = unique_items
if additional_items is not None:
self.additional_items = additional_items
super(ArrayItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if not self.items and not self.additional_items:
raise RuntimeError(
'One of items or additional_items must be passed.'
)
if self.items is not None:
if isinstance(self.items, (list, tuple)):
for item in self.items:
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
'All items passed in the item argument tuple/list must be '
'a subclass of Schema, SchemaItem or BaseSchemaItem, '
'not {0}'.format(type(item))
)
elif not isinstance(self.items, (Schema, SchemaItem)):
raise RuntimeError(
'The items argument passed must be a subclass of '
'Schema, SchemaItem or BaseSchemaItem, not '
'{0}'.format(type(self.items))
)
def __get_items__(self):
if isinstance(self.items, (Schema, SchemaItem)):
# This is either a Schema or a Basetem, return it in it's
return self.items.serialize()
if isinstance(self.items, (tuple, list)):
items = []
for item in self.items:
items.append(item.serialize())
return items
class DictItem(BaseSchemaItem):
__type__ = 'object'
__serialize_attr_aliases__ = {
'min_properties': 'minProperties',
'max_properties': 'maxProperties',
'pattern_properties': 'patternProperties',
'additional_properties': 'additionalProperties'
}
properties = None
pattern_properties = None
additional_properties = None
min_properties = None
max_properties = None
def __init__(self,
properties=None,
pattern_properties=None,
additional_properties=None,
min_properties=None,
max_properties=None,
**kwargs):
if properties is not None:
self.properties = properties
if pattern_properties is not None:
self.pattern_properties = pattern_properties
if additional_properties is not None:
self.additional_properties = additional_properties
if min_properties is not None:
self.min_properties = min_properties
if max_properties is not None:
self.max_properties = max_properties
super(DictItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if not self.properties and not self.pattern_properties and not self.additional_properties:
raise RuntimeError(
'One of properties, pattern_properties or additional_properties must be passed'
)
if self.properties is not None:
if not isinstance(self.properties, (Schema, dict)):
raise RuntimeError(
'The passed properties must be passed as a dict or '
' a Schema not \'{0}\''.format(type(self.properties))
)
if not isinstance(self.properties, Schema):
for key, prop in self.properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
'The passed property who\'s key is \'{0}\' must be of type '
'Schema, SchemaItem or BaseSchemaItem, not '
'\'{1}\''.format(key, type(prop))
)
if self.pattern_properties is not None:
if not isinstance(self.pattern_properties, dict):
raise RuntimeError(
'The passed pattern_properties must be passed as a dict '
'not \'{0}\''.format(type(self.pattern_properties))
)
for key, prop in self.pattern_properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
'The passed pattern_property who\'s key is \'{0}\' must '
'be of type Schema, SchemaItem or BaseSchemaItem, '
'not \'{1}\''.format(key, type(prop))
)
if self.additional_properties is not None:
if not isinstance(self.additional_properties, (bool, Schema, SchemaItem)):
raise RuntimeError(
'The passed additional_properties must be of type bool, '
'Schema, SchemaItem or BaseSchemaItem, not \'{0}\''.format(
type(self.pattern_properties)
)
)
def __get_properties__(self):
if self.properties is None:
return
if isinstance(self.properties, Schema):
return self.properties.serialize()['properties']
properties = OrderedDict()
for key, prop in self.properties.items():
properties[key] = prop.serialize()
return properties
def __get_pattern_properties__(self):
if self.pattern_properties is None:
return
pattern_properties = OrderedDict()
for key, prop in self.pattern_properties.items():
pattern_properties[key] = prop.serialize()
return pattern_properties
def __get_additional_properties__(self):
if self.additional_properties is None:
return
if isinstance(self.additional_properties, bool):
return self.additional_properties
return self.additional_properties.serialize()
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
result = super(DictItem, self).serialize()
required = []
if self.properties is not None:
if isinstance(self.properties, Schema):
serialized = self.properties.serialize()
if 'required' in serialized:
required.extend(serialized['required'])
else:
for key, prop in self.properties.items():
if prop.required:
required.append(key)
if required:
result['required'] = required
return result
class RequirementsItem(SchemaItem):
__type__ = 'object'
requirements = None
def __init__(self, requirements=None):
if requirements is not None:
self.requirements = requirements
super(RequirementsItem, self).__init__()
def __validate_attributes__(self):
if self.requirements is None:
raise RuntimeError(
'The passed requirements must not be empty'
)
if not isinstance(self.requirements, (SchemaItem, list, tuple, set)):
raise RuntimeError(
'The passed requirements must be passed as a list, tuple, '
'set SchemaItem or BaseSchemaItem, not \'{0}\''.format(self.requirements)
)
if not isinstance(self.requirements, SchemaItem):
if not isinstance(self.requirements, list):
self.requirements = list(self.requirements)
for idx, item in enumerate(self.requirements):
if not isinstance(item, (six.string_types, SchemaItem)):
raise RuntimeError(
'The passed requirement at the {0} index must be of type '
'str or SchemaItem, not \'{1}\''.format(idx, type(item))
)
def serialize(self):
if isinstance(self.requirements, SchemaItem):
requirements = self.requirements.serialize()
else:
requirements = []
for requirement in self.requirements:
if isinstance(requirement, SchemaItem):
requirements.append(requirement.serialize())
continue
requirements.append(requirement)
return {'required': requirements}
class OneOfItem(SchemaItem):
__type__ = 'oneOf'
items = None
def __init__(self, items=None, required=None):
if items is not None:
self.items = items
super(OneOfItem, self).__init__(required=required)
def __validate_attributes__(self):
if not self.items:
raise RuntimeError(
'The passed items must not be empty'
)
if not isinstance(self.items, (list, tuple)):
raise RuntimeError(
'The passed items must be passed as a list/tuple not '
'\'{0}\''.format(type(self.items))
)
for idx, item in enumerate(self.items):
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
'The passed item at the {0} index must be of type '
'Schema, SchemaItem or BaseSchemaItem, not '
'\'{1}\''.format(idx, type(item))
)
if not isinstance(self.items, list):
self.items = list(self.items)
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
return {self.__type__: [i.serialize() for i in self.items]}
class AnyOfItem(OneOfItem):
__type__ = 'anyOf'
class AllOfItem(OneOfItem):
__type__ = 'allOf'
class NotItem(SchemaItem):
__type__ = 'not'
item = None
def __init__(self, item=None):
if item is not None:
self.item = item
super(NotItem, self).__init__()
def __validate_attributes__(self):
if not self.item:
raise RuntimeError(
'An item must be passed'
)
if not isinstance(self.item, (Schema, SchemaItem)):
raise RuntimeError(
'The passed item be of type Schema, SchemaItem or '
'BaseSchemaItem, not \'{1}\''.format(type(self.item))
)
def serialize(self):
return {self.__type__: self.item.serialize()}
class PortItem(IntegerItem):
minimum = 0
maximum = 65535
class ComplexSchemaItem(BaseSchemaItem):
_attributes = []
_definition_name = None
def __init__(self, definition_name=None, required=None):
super(ComplexSchemaItem, self).__init__(required=required)
self.__type__ = 'object'
self._definition_name = definition_name if definition_name else \
self.__class__.__name__
# Schema attributes might have been added as class attributes so we
# and they must be added to the _attributes attr
self._add_missing_schema_attributes()
def _add_missing_schema_attributes(self):
for attr in [attr for attr in dir(self) if not attr.startswith('__')]:
attr_val = getattr(self, attr)
if isinstance(getattr(self, attr), SchemaItem) and \
attr not in self._attributes:
self._attributes.append(attr)
@property
def definition_name(self):
return self._definition_name
def serialize(self):
return {'$ref': '
def get_definition(self):
serialized = super(ComplexSchemaItem, self).serialize()
# Adjust entries in the serialization
del serialized['definition_name']
serialized['title'] = self.definition_name
properties = {}
required_attr_names = []
for attr_name in self._attributes:
attr = getattr(self, attr_name)
if attr and isinstance(attr, BaseSchemaItem):
# Remove the attribute entry added by the base serialization
del serialized[attr_name]
properties[attr_name] = attr.serialize()
properties[attr_name]['type'] = attr.__type__
if attr.required:
required_attr_names.append(attr_name)
if serialized.get('properties') is None:
serialized['properties'] = {}
serialized['properties'].update(properties)
# Assign the required array
if required_attr_names:
serialized['required'] = required_attr_names
return serialized
def get_complex_attrs(self):
return [getattr(self, attr_name) for attr_name in self._attributes if
isinstance(getattr(self, attr_name), ComplexSchemaItem)]
class DefinitionsSchema(Schema):
@classmethod
def serialize(cls, id_=None):
# Get the initial serialization
serialized = super(DefinitionsSchema, cls).serialize(id_)
complex_items = []
# Augment the serializations with the definitions of all complex items
aux_items = cls._items.values()
# Convert dict_view object to a list on Python 3
if six.PY3:
aux_items = list(aux_items)
while aux_items:
item = aux_items.pop(0)
# Add complex attributes
if isinstance(item, ComplexSchemaItem):
complex_items.append(item)
aux_items.extend(item.get_complex_attrs())
# Handle container items
if isinstance(item, OneOfItem):
aux_items.extend(item.items)
elif isinstance(item, ArrayItem):
aux_items.append(item.items)
elif isinstance(item, DictItem):
if item.properties:
aux_items.extend(item.properties.values())
if item.additional_properties and \
isinstance(item.additional_properties, SchemaItem):
aux_items.append(item.additional_properties)
definitions = OrderedDict()
for config in complex_items:
if isinstance(config, ComplexSchemaItem):
definitions[config.definition_name] = \
config.get_definition()
serialized['definitions'] = definitions
return serialized
| true | true |
f720868bb7566cf137aaa7665b5dbd671fef24fb | 2,738 | py | Python | skfem/element/element_tet/element_tet_p2.py | carlosal1015/scikit-fem | 1e73a417e9b43fe0a36e29807792c41fa289b77d | [
"BSD-3-Clause"
] | null | null | null | skfem/element/element_tet/element_tet_p2.py | carlosal1015/scikit-fem | 1e73a417e9b43fe0a36e29807792c41fa289b77d | [
"BSD-3-Clause"
] | null | null | null | skfem/element/element_tet/element_tet_p2.py | carlosal1015/scikit-fem | 1e73a417e9b43fe0a36e29807792c41fa289b77d | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from ..element_h1 import ElementH1
class ElementTetP2(ElementH1):
nodal_dofs = 1
edge_dofs = 1
dim = 3
maxdeg = 2
dofnames = ['u', 'u']
doflocs = np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[.5, 0., 0.],
[.5, .5, 0.],
[0., .5, 0.],
[0., .0, .5],
[.5, .0, .5],
[.0, .5, .5]])
def lbasis(self, X, i):
x, y, z = X
if i == 0: # at (0,0,0)
phi = (1. - 3.*x + 2.*x**2 - 3.*y + 4.*x*y +
2.*y**2 - 3.*z + 4.*x*z + 4.*y*z + 2.*z**2)
dphi = np.array([
-3. + 4.*x + 4.*y + 4.*z,
-3. + 4.*x + 4.*y + 4.*z,
-3. + 4.*x + 4.*y + 4.*z,
])
elif i == 1: # at (1,0,0)
phi = - 1.*x + 2.*x**2
dphi = np.array([
-1 + 4*x,
0*x,
0*x,
])
elif i == 2: # at (0,1,0)
phi = - 1.*y + 2.*y**2
dphi = np.array([
0*x,
-1. + 4.*y,
0*x,
])
elif i == 3: # at (0,0,1)
phi = - 1.*z + 2.*z**2
dphi = np.array([
0*x,
0*x,
-1. + 4.*z,
])
elif i == 4: # between (0,1)
phi = 4.*x - 4.*x**2 - 4.*x*y - 4*x*z
dphi = np.array([
4. - 8.*x - 4.*y - 4.*z,
-4.*x,
-4.*x,
])
elif i == 5: # between (1,2)
phi = 4.*x*y
dphi = np.array([
4.*y,
4.*x,
0*x,
])
elif i == 6: # between (0,2)
phi = 0. + 4.*y - 4.*x*y - 4.*y**2 - 4.*y*z
dphi = np.array([
-4.*y,
4. - 4.*x - 8.*y - 4.*z,
-4.*y,
])
elif i == 7: # between (0,3)
phi = 0. + 4.*z - 4.*x*z - 4.*y*z - 4.*z**2
dphi = np.array([
-4.*z,
-4.*z,
4. - 4.*x - 4.*y - 8.*z,
])
elif i == 8:
phi = 0. + 4.*x*z
dphi = np.array([
4.*z,
0*x,
4*x,
])
elif i == 9:
phi = 0. + 4.*y*z
dphi = np.array([
0*x,
4*z,
4*y,
])
else:
raise Exception("!")
return phi, dphi
| 27.38 | 62 | 0.233382 | import numpy as np
from ..element_h1 import ElementH1
class ElementTetP2(ElementH1):
nodal_dofs = 1
edge_dofs = 1
dim = 3
maxdeg = 2
dofnames = ['u', 'u']
doflocs = np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[.5, 0., 0.],
[.5, .5, 0.],
[0., .5, 0.],
[0., .0, .5],
[.5, .0, .5],
[.0, .5, .5]])
def lbasis(self, X, i):
x, y, z = X
if i == 0:
phi = (1. - 3.*x + 2.*x**2 - 3.*y + 4.*x*y +
2.*y**2 - 3.*z + 4.*x*z + 4.*y*z + 2.*z**2)
dphi = np.array([
-3. + 4.*x + 4.*y + 4.*z,
-3. + 4.*x + 4.*y + 4.*z,
-3. + 4.*x + 4.*y + 4.*z,
])
elif i == 1:
phi = - 1.*x + 2.*x**2
dphi = np.array([
-1 + 4*x,
0*x,
0*x,
])
elif i == 2:
phi = - 1.*y + 2.*y**2
dphi = np.array([
0*x,
-1. + 4.*y,
0*x,
])
elif i == 3:
phi = - 1.*z + 2.*z**2
dphi = np.array([
0*x,
0*x,
-1. + 4.*z,
])
elif i == 4:
phi = 4.*x - 4.*x**2 - 4.*x*y - 4*x*z
dphi = np.array([
4. - 8.*x - 4.*y - 4.*z,
-4.*x,
-4.*x,
])
elif i == 5:
phi = 4.*x*y
dphi = np.array([
4.*y,
4.*x,
0*x,
])
elif i == 6:
phi = 0. + 4.*y - 4.*x*y - 4.*y**2 - 4.*y*z
dphi = np.array([
-4.*y,
4. - 4.*x - 8.*y - 4.*z,
-4.*y,
])
elif i == 7:
phi = 0. + 4.*z - 4.*x*z - 4.*y*z - 4.*z**2
dphi = np.array([
-4.*z,
-4.*z,
4. - 4.*x - 4.*y - 8.*z,
])
elif i == 8:
phi = 0. + 4.*x*z
dphi = np.array([
4.*z,
0*x,
4*x,
])
elif i == 9:
phi = 0. + 4.*y*z
dphi = np.array([
0*x,
4*z,
4*y,
])
else:
raise Exception("!")
return phi, dphi
| true | true |
f720884f949f265d08ef7c4e59ff5312172239dc | 5,043 | py | Python | back/Hera/utils.py | pingPoltergeist/Hera | 519336cebbcf14ff3da6299e946407788121a0b7 | [
"MIT"
] | 1 | 2021-12-09T11:37:20.000Z | 2021-12-09T11:37:20.000Z | back/Hera/utils.py | pingPoltergeist/Hera | 519336cebbcf14ff3da6299e946407788121a0b7 | [
"MIT"
] | 1 | 2021-11-05T09:14:50.000Z | 2021-11-05T09:14:50.000Z | back/Hera/utils.py | pingPoltergeist/Hera | 519336cebbcf14ff3da6299e946407788121a0b7 | [
"MIT"
] | 2 | 2022-01-13T15:12:36.000Z | 2022-03-10T01:35:25.000Z | import traceback
from pathlib import Path
import hashlib
import yaml
def get_media_dirs(media_dir_stream):
result = dict()
movie_dir_map = dict()
for media_location in media_dir_stream[0].replace('\n', '').replace('\r', '').split(','):
movie_dir_map[hashlib.md5(media_location.encode('utf-8')).hexdigest()] = Path(media_location)
tv_dir_map = dict()
for tv_location in media_dir_stream[1].replace('\n', '').replace('\r', '').split(','):
tv_dir_map[hashlib.md5(tv_location.encode('utf-8')).hexdigest()] = Path(tv_location)
result['movie_dir_map'] = movie_dir_map
result['tv_dir_map'] = tv_dir_map
return result
class Config:
__filepath = None
__config = dict()
__movie_dirs_map = dict()
__tv_dirs_map = dict()
def __init__(self, config_filepath=None):
blank_config = {
'movie_dir': list(),
'tv_dir': list()
}
self.__filepath = config_filepath
try:
with open(self.__filepath) as f:
self.__config = yaml.load(f, Loader=yaml.FullLoader)
if (
(not self.__config) or
(type(self.__config) != dict) or
(type(self.__config.get('movie_dir')) != list) or
(type(self.__config.get('tv_dir')) != list)
):
self.__config = blank_config
try:
with open(self.__filepath, 'w') as f:
data = yaml.dump(self.__config, f)
except Exception as ex:
print('Config :: update -> ', ex)
traceback.print_exc()
except Exception as ex:
self.__config = blank_config
try:
with open(self.__filepath, 'w') as f:
data = yaml.dump(self.__config, f)
except Exception as ex:
print('Config :: update -> ', ex)
traceback.print_exc()
print('Config::init: -> Creating a fresh config.yaml file')
finally:
if type(self.__config.get('movie_dir')) == list:
for media_location in self.__config.get('movie_dir'):
self.__movie_dirs_map[hashlib.md5(media_location.encode('utf-8')).hexdigest()] = Path(
media_location)
if type(self.__config.get('tv_dir')) == list:
for tv_location in self.__config.get('tv_dir'):
self.__tv_dirs_map[hashlib.md5(tv_location.encode('utf-8')).hexdigest()] = Path(tv_location)
def get(self):
return self.__config
def get_movie_dirs_map(self):
return self.__movie_dirs_map
def get_tv_dirs_map(self):
return self.__tv_dirs_map
def add_to_tv_dirs(self, new_tv_dir):
if Path(new_tv_dir).exists() and (new_tv_dir not in self.__config['tv_dir']):
self.__config['tv_dir'].append(new_tv_dir)
self.__tv_dirs_map[hashlib.md5(new_tv_dir.encode('utf-8')).hexdigest()] = Path(new_tv_dir)
def add_to_movie_dirs(self, new_movie_dir):
if Path(new_movie_dir).exists() and (new_movie_dir not in self.__config['movie_dir']):
self.__config['movie_dir'].append(new_movie_dir)
self.__movie_dirs_map[hashlib.md5(new_movie_dir.encode('utf-8')).hexdigest()] = Path(new_movie_dir)
def remove_from_movie_dirs(self, movie_dir):
if self.__config['movie_dir'] and movie_dir in self.__config['movie_dir']:
self.__config['movie_dir'].remove(movie_dir)
del self.__movie_dirs_map[hashlib.md5(movie_dir.encode('utf-8')).hexdigest()]
def remove_from_tv_dirs(self, tv_dir):
if self.__config['tv_dir'] and tv_dir in self.__config['tv_dir']:
self.__config['tv_dir'].remove(tv_dir)
del self.__tv_dirs_map[hashlib.md5(tv_dir.encode('utf-8')).hexdigest()]
def refresh(self):
try:
with open(self.__filepath) as f:
self.__config = yaml.load(f, Loader=yaml.FullLoader)
if type(self.__config.get('movie_dir')) == list:
for media_location in self.__config.get('movie_dir'):
self.__movie_dirs_map[hashlib.md5(media_location.encode('utf-8')).hexdigest()] = Path(
media_location)
if type(self.__config.get('tv_dir')) == list:
for tv_location in self.__config.get('tv_dir'):
self.__tv_dirs_map[hashlib.md5(tv_location.encode('utf-8')).hexdigest()] = Path(tv_location)
except Exception as ex:
print('Config :: init -> ', ex)
traceback.print_exc()
def update(self, updated_config=None):
if updated_config:
self.__config = updated_config
try:
with open(self.__filepath, 'w') as f:
data = yaml.dump(self.__config, f)
except Exception as ex:
print('Config :: update -> ', ex)
traceback.print_exc()
| 39.398438 | 112 | 0.584969 | import traceback
from pathlib import Path
import hashlib
import yaml
def get_media_dirs(media_dir_stream):
result = dict()
movie_dir_map = dict()
for media_location in media_dir_stream[0].replace('\n', '').replace('\r', '').split(','):
movie_dir_map[hashlib.md5(media_location.encode('utf-8')).hexdigest()] = Path(media_location)
tv_dir_map = dict()
for tv_location in media_dir_stream[1].replace('\n', '').replace('\r', '').split(','):
tv_dir_map[hashlib.md5(tv_location.encode('utf-8')).hexdigest()] = Path(tv_location)
result['movie_dir_map'] = movie_dir_map
result['tv_dir_map'] = tv_dir_map
return result
class Config:
__filepath = None
__config = dict()
__movie_dirs_map = dict()
__tv_dirs_map = dict()
def __init__(self, config_filepath=None):
blank_config = {
'movie_dir': list(),
'tv_dir': list()
}
self.__filepath = config_filepath
try:
with open(self.__filepath) as f:
self.__config = yaml.load(f, Loader=yaml.FullLoader)
if (
(not self.__config) or
(type(self.__config) != dict) or
(type(self.__config.get('movie_dir')) != list) or
(type(self.__config.get('tv_dir')) != list)
):
self.__config = blank_config
try:
with open(self.__filepath, 'w') as f:
data = yaml.dump(self.__config, f)
except Exception as ex:
print('Config :: update -> ', ex)
traceback.print_exc()
except Exception as ex:
self.__config = blank_config
try:
with open(self.__filepath, 'w') as f:
data = yaml.dump(self.__config, f)
except Exception as ex:
print('Config :: update -> ', ex)
traceback.print_exc()
print('Config::init: -> Creating a fresh config.yaml file')
finally:
if type(self.__config.get('movie_dir')) == list:
for media_location in self.__config.get('movie_dir'):
self.__movie_dirs_map[hashlib.md5(media_location.encode('utf-8')).hexdigest()] = Path(
media_location)
if type(self.__config.get('tv_dir')) == list:
for tv_location in self.__config.get('tv_dir'):
self.__tv_dirs_map[hashlib.md5(tv_location.encode('utf-8')).hexdigest()] = Path(tv_location)
def get(self):
return self.__config
def get_movie_dirs_map(self):
return self.__movie_dirs_map
def get_tv_dirs_map(self):
return self.__tv_dirs_map
def add_to_tv_dirs(self, new_tv_dir):
if Path(new_tv_dir).exists() and (new_tv_dir not in self.__config['tv_dir']):
self.__config['tv_dir'].append(new_tv_dir)
self.__tv_dirs_map[hashlib.md5(new_tv_dir.encode('utf-8')).hexdigest()] = Path(new_tv_dir)
def add_to_movie_dirs(self, new_movie_dir):
if Path(new_movie_dir).exists() and (new_movie_dir not in self.__config['movie_dir']):
self.__config['movie_dir'].append(new_movie_dir)
self.__movie_dirs_map[hashlib.md5(new_movie_dir.encode('utf-8')).hexdigest()] = Path(new_movie_dir)
def remove_from_movie_dirs(self, movie_dir):
if self.__config['movie_dir'] and movie_dir in self.__config['movie_dir']:
self.__config['movie_dir'].remove(movie_dir)
del self.__movie_dirs_map[hashlib.md5(movie_dir.encode('utf-8')).hexdigest()]
def remove_from_tv_dirs(self, tv_dir):
if self.__config['tv_dir'] and tv_dir in self.__config['tv_dir']:
self.__config['tv_dir'].remove(tv_dir)
del self.__tv_dirs_map[hashlib.md5(tv_dir.encode('utf-8')).hexdigest()]
def refresh(self):
try:
with open(self.__filepath) as f:
self.__config = yaml.load(f, Loader=yaml.FullLoader)
if type(self.__config.get('movie_dir')) == list:
for media_location in self.__config.get('movie_dir'):
self.__movie_dirs_map[hashlib.md5(media_location.encode('utf-8')).hexdigest()] = Path(
media_location)
if type(self.__config.get('tv_dir')) == list:
for tv_location in self.__config.get('tv_dir'):
self.__tv_dirs_map[hashlib.md5(tv_location.encode('utf-8')).hexdigest()] = Path(tv_location)
except Exception as ex:
print('Config :: init -> ', ex)
traceback.print_exc()
def update(self, updated_config=None):
if updated_config:
self.__config = updated_config
try:
with open(self.__filepath, 'w') as f:
data = yaml.dump(self.__config, f)
except Exception as ex:
print('Config :: update -> ', ex)
traceback.print_exc()
| true | true |
f7208878a3eceaf4d90f7cc71177db7ce94487d3 | 6,366 | py | Python | son.py | nathanmartins/Son-Of-Anton | d45eec2b9263dbd981f468219c9d0fb049bd481d | [
"MIT"
] | null | null | null | son.py | nathanmartins/Son-Of-Anton | d45eec2b9263dbd981f468219c9d0fb049bd481d | [
"MIT"
] | null | null | null | son.py | nathanmartins/Son-Of-Anton | d45eec2b9263dbd981f468219c9d0fb049bd481d | [
"MIT"
] | null | null | null | import logging
import math
import os
import pickle
import re
import PIL.Image
import numpy as np
from mtcnn import MTCNN
from numpy import expand_dims
from sklearn import preprocessing, neighbors
from tensorflow_core.python.keras.models import load_model
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATASET_DIR = os.path.join(BASE_DIR, "dataset")
TRAIN_DIR = os.path.join(DATASET_DIR, "train")
TEST_DIR = os.path.join(DATASET_DIR, "test")
DEBUG = True
# DEBUG = False
model = load_model('facenet_keras.h5')
logging.basicConfig(level=logging.DEBUG if DEBUG else logging.INFO)
def extract_faces(img_path: str):
faces_arr = list()
# Open file and convert to numpy
image_array = np.array(PIL.Image.open(img_path).convert("RGB"), "uint8")
detector = MTCNN()
faces = detector.detect_faces(image_array)
if len(faces) == 0:
# If there are no people in a training image, skip the image.
logging.warning(f"Image {img_path} not suitable for training. Size{len(faces)}")
return None, None
for face in faces:
# logging.debug(f"Image {img_path} is suitable for training!")
x1, y1, width, height = face['box']
# bug fix
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = image_array[y1:y2, x1:x2]
# resize pixels to the model size
image = PIL.Image.fromarray(face)
image = image.resize((160, 160))
faces_arr.append(np.asarray(image))
return faces_arr, faces
def get_embedding(face_pixels):
# scale pixel values
face_pixels = face_pixels.astype('float32')
# standardize pixel values across channels (global)
mean, std = face_pixels.mean(), face_pixels.std()
face_pixels = (face_pixels - mean) / std
# transform face into one sample
samples = expand_dims(face_pixels, axis=0)
# make prediction to get embedding
return model.predict(samples)
def prepare():
x_train = list()
y_labels = list()
# Loop through each person in the training set
for label in os.listdir(TRAIN_DIR):
path = os.path.join(TRAIN_DIR, label)
# This will ignore anything that is not jpg|jpeg|png *USE WITH CAUTION*
allowed_files = [os.path.join(path, f) for f in os.listdir(path) if
re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
for img_path in allowed_files:
logging.debug(f"File: {img_path}, Label: {label}")
faces, _ = extract_faces(img_path)
if faces is not None:
for face in faces:
x_train.append(np.asarray(face))
y_labels.append(label)
# Converting string labels into numbers.
le = preprocessing.LabelEncoder()
labels_encoded = le.fit_transform(y_labels)
with open("x_train.pickle", 'wb') as f:
pickle.dump(x_train, f)
with open("y_labels.pickle", 'wb') as f:
pickle.dump(y_labels, f)
with open("labels_encoded.pickle", 'wb') as f:
pickle.dump(labels_encoded, f)
def train():
with open("x_train.pickle", 'rb') as f:
x_train = pickle.load(f)
# x_train = np.array(x_train)
# x_train = np.reshape(x_train, (-1, 2))
with open("labels_encoded.pickle", 'rb') as f:
y_labels = pickle.load(f)
# convert each face in the train set to an embedding
encoded_x_train = list()
for face_pixels in x_train:
embedding = get_embedding(face_pixels)[0]
encoded_x_train.append(embedding)
encoded_x_train = np.asarray(encoded_x_train)
# Determine how many neighbors to use for weighting in the KNN classifier.
n_neighbors = int(round(math.sqrt(len(x_train))))
logging.info(f"n_neighbors: {n_neighbors}")
# Create and train the KNN classifier.
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm="ball_tree", weights='distance')
knn_clf.fit(encoded_x_train, y_labels)
# perdicted = list()
# metrics.accuracy(y_labels, perdicted)
# Save the trained KNN classifier
with open("model.clf", 'wb') as f:
pickle.dump(knn_clf, f)
def predict():
with open("model.clf", 'rb') as f:
knn_clf = pickle.load(f)
with open("labels_encoded.pickle", 'rb') as f:
y_labels = pickle.load(f)
le = preprocessing.LabelEncoder()
le.fit_transform(y_labels)
# breakpoint()
for img in os.listdir(TEST_DIR):
# logging.info(f"Testing image: {img}")
full_path = os.path.join(TEST_DIR, img)
faces, raws = extract_faces(full_path)
if faces is None:
logging.info(f"WARNING: COULD NOT FIND A FACE IN {full_path}")
continue
c = 0
for face in faces:
faces_encodings = get_embedding(face)
# A list of tuples of found face locations in css (top, right, bottom, left) order
x_face_locations = tuple(raws[c]["box"])
c += 1
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = list()
for i in range(len(x_face_locations)):
try:
dis = closest_distances[0][i][0]
# logging.debug(f"Closest distance is {dis} - {dis < 7}")
if dis < 7:
# logging.debug(f"Adding a Dis {dis}")
are_matches.append(dis)
except IndexError:
pass
# logging.debug(f"Dis is {are_matches}")
pred = knn_clf.predict(faces_encodings)
if len(are_matches) > 0:
for pred, loc, rec in zip(pred, x_face_locations, are_matches):
if rec:
if pred == 1:
a = "unknown"
else:
a = "nsm"
logging.info(f"Found: {a} - {img}")
else:
logging.warning(f"WARNING: COULD NOT IDENTIFY A FACE IN {full_path}")
else:
a = "unknown"
logging.info(f"Found: {a} - {img}")
if __name__ == '__main__':
# prepare()
# train()
predict()
| 30.028302 | 112 | 0.600377 | import logging
import math
import os
import pickle
import re
import PIL.Image
import numpy as np
from mtcnn import MTCNN
from numpy import expand_dims
from sklearn import preprocessing, neighbors
from tensorflow_core.python.keras.models import load_model
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATASET_DIR = os.path.join(BASE_DIR, "dataset")
TRAIN_DIR = os.path.join(DATASET_DIR, "train")
TEST_DIR = os.path.join(DATASET_DIR, "test")
DEBUG = True
model = load_model('facenet_keras.h5')
logging.basicConfig(level=logging.DEBUG if DEBUG else logging.INFO)
def extract_faces(img_path: str):
faces_arr = list()
image_array = np.array(PIL.Image.open(img_path).convert("RGB"), "uint8")
detector = MTCNN()
faces = detector.detect_faces(image_array)
if len(faces) == 0:
logging.warning(f"Image {img_path} not suitable for training. Size{len(faces)}")
return None, None
for face in faces:
x1, y1, width, height = face['box']
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
face = image_array[y1:y2, x1:x2]
image = PIL.Image.fromarray(face)
image = image.resize((160, 160))
faces_arr.append(np.asarray(image))
return faces_arr, faces
def get_embedding(face_pixels):
face_pixels = face_pixels.astype('float32')
mean, std = face_pixels.mean(), face_pixels.std()
face_pixels = (face_pixels - mean) / std
samples = expand_dims(face_pixels, axis=0)
return model.predict(samples)
def prepare():
x_train = list()
y_labels = list()
for label in os.listdir(TRAIN_DIR):
path = os.path.join(TRAIN_DIR, label)
allowed_files = [os.path.join(path, f) for f in os.listdir(path) if
re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
for img_path in allowed_files:
logging.debug(f"File: {img_path}, Label: {label}")
faces, _ = extract_faces(img_path)
if faces is not None:
for face in faces:
x_train.append(np.asarray(face))
y_labels.append(label)
le = preprocessing.LabelEncoder()
labels_encoded = le.fit_transform(y_labels)
with open("x_train.pickle", 'wb') as f:
pickle.dump(x_train, f)
with open("y_labels.pickle", 'wb') as f:
pickle.dump(y_labels, f)
with open("labels_encoded.pickle", 'wb') as f:
pickle.dump(labels_encoded, f)
def train():
with open("x_train.pickle", 'rb') as f:
x_train = pickle.load(f)
with open("labels_encoded.pickle", 'rb') as f:
y_labels = pickle.load(f)
encoded_x_train = list()
for face_pixels in x_train:
embedding = get_embedding(face_pixels)[0]
encoded_x_train.append(embedding)
encoded_x_train = np.asarray(encoded_x_train)
n_neighbors = int(round(math.sqrt(len(x_train))))
logging.info(f"n_neighbors: {n_neighbors}")
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm="ball_tree", weights='distance')
knn_clf.fit(encoded_x_train, y_labels)
with open("model.clf", 'wb') as f:
pickle.dump(knn_clf, f)
def predict():
with open("model.clf", 'rb') as f:
knn_clf = pickle.load(f)
with open("labels_encoded.pickle", 'rb') as f:
y_labels = pickle.load(f)
le = preprocessing.LabelEncoder()
le.fit_transform(y_labels)
for img in os.listdir(TEST_DIR):
full_path = os.path.join(TEST_DIR, img)
faces, raws = extract_faces(full_path)
if faces is None:
logging.info(f"WARNING: COULD NOT FIND A FACE IN {full_path}")
continue
c = 0
for face in faces:
faces_encodings = get_embedding(face)
x_face_locations = tuple(raws[c]["box"])
c += 1
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = list()
for i in range(len(x_face_locations)):
try:
dis = closest_distances[0][i][0]
if dis < 7:
are_matches.append(dis)
except IndexError:
pass
pred = knn_clf.predict(faces_encodings)
if len(are_matches) > 0:
for pred, loc, rec in zip(pred, x_face_locations, are_matches):
if rec:
if pred == 1:
a = "unknown"
else:
a = "nsm"
logging.info(f"Found: {a} - {img}")
else:
logging.warning(f"WARNING: COULD NOT IDENTIFY A FACE IN {full_path}")
else:
a = "unknown"
logging.info(f"Found: {a} - {img}")
if __name__ == '__main__':
predict()
| true | true |
f72088a05bdf713d0b6f36ecd12b25eb950976d1 | 12,785 | py | Python | raiden/exceptions.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | null | null | null | raiden/exceptions.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | 69 | 2020-07-21T05:49:21.000Z | 2022-03-08T18:09:44.000Z | raiden/exceptions.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | null | null | null | """
What do you want from this file?
1. I need to look up when to raise what.
Then read on the docstrings.
2. I have to add a new exception.
Make sure you catch it somewhere. Sometimes you'll realize you cannot catch it.
Especially, if your new exception indicates bug in the Raiden codebase,
you are not supposed to catch the exception. Instead, use one of the
existing uncaught exceptions: RaidenUnrecoverableError or BrokenPreconditionError.
"""
import enum
from typing import Any, Dict, List
@enum.unique
class PFSError(enum.IntEnum):
""" Error codes as returned by the PFS.
Defined in the pathfinding_service.exceptions module in
https://github.com/raiden-network/raiden-services
"""
# TODO: link to PFS spec as soon as the error codes are added there.
# 20xx - General
INVALID_REQUEST = 2000
INVALID_SIGNATURE = 2001
REQUEST_OUTDATED = 2002
# 21xx - IOU errors
BAD_IOU = 2100
MISSING_IOU = 2101
WRONG_IOU_RECIPIENT = 2102
IOU_EXPIRED_TOO_EARLY = 2103
INSUFFICIENT_SERVICE_PAYMENT = 2104
IOU_ALREADY_CLAIMED = 2105
USE_THIS_IOU = 2106
DEPOSIT_TOO_LOW = 2107
# 22xx - Routing
NO_ROUTE_FOUND = 2201
@staticmethod
def is_iou_rejected(error_code: int) -> bool:
return error_code >= 2100 and error_code < 2200
class RaidenError(Exception):
"""Raiden base exception.
This exception exists for user code to catch all Raiden related exceptions.
This should be used with care, because `RaidenUnrecoverableError` is a
`RaidenError`, and when one of such exceptions is raised the state of the
client node is undetermined.
"""
class RaidenRecoverableError(RaidenError):
"""Exception for recoverable errors.
This base exception exists for code written in a EAFP style. It should be
inherited when exceptions are expected to happen and handling them will not
leave the node is a undefined state.
Usage examples:
- Operations that failed because of race conditions, e.g. openning a
channel fails because both participants try at the same time.
- Transient connectivety problems.
- Timeouts.
Note:
Some errors are undesirable, but they are still possible and should be
expected. Example a secret registration that finishes after the timeout
window.
"""
class RaidenUnrecoverableError(RaidenError):
"""Base exception for unexpected errors that should crash the client.
This exception is used when something unrecoverable happened:
- Corrupted database.
- Running out of disk space.
"""
class RaidenValidationError(RaidenRecoverableError):
"""Exception raised when an input value is invalid.
This exception must be raised on the edges of the system, to inform the
caller one of the provided values is invalid.
Actually, this exception can also be used in the proxies for insane values
that are not valid regardless of the chain state.
If a value is not acceptable because of the chain state, BrokenPreconditionError
must be used instead.
Also, if a value indicates a bug in our codebase, RaidenValidationError
is not the right error because RaidenValidationError is considered as a
recoverable error.
We prefer this exception over ValueError because libraries (e.g. web3.py)
raise ValueError sometimes, and we want to differentiate our own exceptions
from those.
"""
class PaymentConflict(RaidenRecoverableError):
""" Raised when there is another payment with the same identifier but the
attributes of the payment don't match.
"""
class InsufficientFunds(RaidenError):
""" Raised when provided account doesn't have token funds to complete the
requested deposit.
Used when a *user* tries to deposit a given amount of token in a channel,
but his account doesn't have enough funds to pay for the deposit.
"""
class DepositOverLimit(RaidenError):
""" Raised when the requested deposit is over the limit
Used when a *user* tries to deposit a given amount of token in a channel,
but the amount is over the testing limit.
"""
class DepositMismatch(RaidenRecoverableError):
""" Raised when the requested deposit is lower than actual channel deposit
Used when a *user* tries to deposit a given amount of tokens in a channel,
but the on-chain amount is already higher.
"""
class InvalidChannelID(RaidenError):
""" Raised when the user provided value is not a channel id. """
class WithdrawMismatch(RaidenRecoverableError):
""" Raised when the requested withdraw is larger than actual channel balance. """
class InvalidChecksummedAddress(RaidenError):
"""Raised when the user provided address is not a str or the value is not
properly checksummed.
Exception used to enforce the checksummed for external APIs. The address
provided by a user must be checksummed to avoid errors, the checksummed
address must be validated at the edges before calling internal functions.
"""
class InvalidBinaryAddress(RaidenValidationError):
"""Raised when the address is not binary or it is not 20 bytes long.
Exception used to enforce the sandwich encoding for python APIs. The
internal address representation used by Raiden is binary, the binary
address must be validated at the edges before calling internal functions.
"""
class InvalidSecret(RaidenError):
""" Raised when the user provided value is not a valid secret. """
class InvalidSecretHash(RaidenError):
""" Raised when the user provided value is not a valid secrethash. """
class InvalidAmount(RaidenError):
""" Raised when the user provided value is not a positive integer and
cannot be used to define a transfer value.
"""
class InvalidSettleTimeout(RaidenError):
""" Raised when the user provided timeout value is less than the minimum
settle timeout"""
class InvalidRevealTimeout(RaidenError):
""" Raised when the channel's settle timeout is less than
double the user provided reveal timeout value.
condition: settle_timeout < reveal_timeout * 2
"""
class InvalidSignature(RaidenError):
"""Raised on invalid signature recover/verify"""
class InvalidPaymentIdentifier(RaidenError):
"""Raised on invalid payment identifier"""
class SamePeerAddress(RaidenError):
""" Raised when a user tries to perform an action that requires two different partners
"""
class UnknownTokenAddress(RaidenError):
""" Raised when the token address in unknown. """
class TokenNotRegistered(RaidenError):
""" Raised if there is no token network for token used when opening a channel """
class AlreadyRegisteredTokenAddress(RaidenError):
""" Raised when the token address in already registered with the given network. """
class InvalidToken(RaidenError):
""" Raised if the token does not follow the ERC20 standard. """
class MaxTokenNetworkNumberReached(RaidenError):
""" Raised if the maximum amount of token networks has been registered. """
class InvalidTokenAddress(RaidenError):
""" Raised if the token address is invalid. """
class InvalidTokenNetworkDepositLimit(RaidenError):
""" Raised when an invalid token network deposit
limit is passed to the token network registry proxy.
"""
class InvalidChannelParticipantDepositLimit(RaidenError):
""" Raised when an invalid channel participant
deposit limit is passed to the token network registry proxy.
"""
class EthNodeInterfaceError(RaidenError):
""" Raised when the underlying ETH node does not support an rpc interface"""
class AddressWithoutCode(RaidenError):
"""Raised on attempt to execute contract on address without a code."""
class DuplicatedChannelError(RaidenRecoverableError):
"""Raised if someone tries to create a channel that already exists."""
class UnexpectedChannelState(RaidenRecoverableError):
"""Raised if an operation is attempted on a channel while it is in an unexpected state."""
class ContractCodeMismatch(RaidenError):
"""Raised if the onchain code of the contract differs."""
class APIServerPortInUseError(RaidenError):
"""Raised when API server port is already in use"""
class InvalidDBData(RaidenUnrecoverableError):
"""Raised when the data of the WAL are in an unexpected format"""
class InvalidBlockNumberInput(RaidenError):
"""Raised when the user provided a block number that is < 0 or > UINT64_MAX"""
class NoStateForBlockIdentifier(RaidenError):
"""
Raised when we attempt to provide a block identifier older
than STATE_PRUNING_AFTER_BLOCKS blocks
"""
class InvalidNumberInput(RaidenError):
"""Raised when the user provided an invalid number"""
class TransportError(RaidenError):
""" Raised when a transport encounters an unexpected error """
class ReplacementTransactionUnderpriced(RaidenError):
"""Raised when a replacement transaction is rejected by the blockchain"""
class EthereumNonceTooLow(RaidenUnrecoverableError):
"""Raised when a new transaction is sent with a nonce that has been used already."""
class ChannelOutdatedError(RaidenError):
""" Raised when an action is invoked on a channel whose
identifier has been replaced with a new channel identifier
due to a close/re-open of current channel.
"""
class InsufficientGasReserve(RaidenError):
""" Raised when an action cannot be done because the available balance
is not sufficient for the lifecycles of all active channels.
"""
class InsufficientEth(RaidenError):
""" Raised when an on-chain action failed because we could not pay for
the gas. (The case we try to avoid with `InsufficientGasReserve`
exceptions.)
"""
class BrokenPreconditionError(RaidenError):
""" Raised when the chain doesn't satisfy transaction preconditions
that proxies check at the specified block.
This exception should be used, when the proxy already sees that,
on the specified block, due to the blockchain state, an assert
or a revert in the smart contract would be hit for the triggering block.
This exception should not be used for errors independent of the
chain state. For example, when an argument needs to be always non-zero,
violation of this condition is not a BrokenPreconditionError, but
RaidenValidationError.
This exception can also be used when preconditions are not satisfied
on another Raiden node.
"""
class ServiceRequestFailed(RaidenError):
""" Raised when a request to one of the raiden services fails. """
class PFSReturnedError(ServiceRequestFailed):
""" The PFS responded with a json message containing an error """
def __init__(self, message: str, error_code: int, error_details: Dict[str, Any]) -> None:
args: List[Any] = [f"{message} (PFS error code: {error_code})"]
if error_details:
args.append(error_details)
super().__init__(*args)
self.message = error_code
self.error_code = error_code
self.error_details = error_details
@classmethod
def from_response(cls, response_json: Dict[str, Any]) -> "PFSReturnedError":
# TODO: Use marshmallow to deserialize the message fields. Otherwise we
# can't guarantee that the variables have the right type, causing bad
# error handling.
error_params = dict(
message=response_json.get("errors", ""),
error_code=response_json.get("error_code", 0),
error_details=response_json.get("error_details"),
)
if PFSError.is_iou_rejected(error_params["error_code"]):
return ServiceRequestIOURejected(**error_params)
return cls(**error_params)
class ServiceRequestIOURejected(PFSReturnedError):
""" Raised when a service request fails due to a problem with the iou. """
class UndefinedMediationFee(RaidenError):
"""The fee schedule is not applicable resulting in undefined fees
Either the raiden node is not capable of mediating this payment, or the
FeeSchedule is outdated/inconsistent."""
class TokenNetworkDeprecated(RaidenError):
""" Raised when the token network proxy safety switch
is turned on (i.e deprecated).
"""
class MintFailed(RaidenError):
""" Raised when an attempt to mint a testnet token failed. """
class SerializationError(RaidenError):
""" Invalid data are to be (de-)serialized. """
class MatrixSyncMaxTimeoutReached(RaidenRecoverableError):
""" Raised if processing the matrix response takes longer than the poll timeout. """
class ConfigurationError(RaidenError):
""" Raised when there is something wrong with the provided Raiden Configuration/arguments """
| 31.882793 | 97 | 0.732264 | import enum
from typing import Any, Dict, List
@enum.unique
class PFSError(enum.IntEnum):
INVALID_REQUEST = 2000
INVALID_SIGNATURE = 2001
REQUEST_OUTDATED = 2002
BAD_IOU = 2100
MISSING_IOU = 2101
WRONG_IOU_RECIPIENT = 2102
IOU_EXPIRED_TOO_EARLY = 2103
INSUFFICIENT_SERVICE_PAYMENT = 2104
IOU_ALREADY_CLAIMED = 2105
USE_THIS_IOU = 2106
DEPOSIT_TOO_LOW = 2107
NO_ROUTE_FOUND = 2201
@staticmethod
def is_iou_rejected(error_code: int) -> bool:
return error_code >= 2100 and error_code < 2200
class RaidenError(Exception):
class RaidenRecoverableError(RaidenError):
class RaidenUnrecoverableError(RaidenError):
class RaidenValidationError(RaidenRecoverableError):
class PaymentConflict(RaidenRecoverableError):
class InsufficientFunds(RaidenError):
class DepositOverLimit(RaidenError):
class DepositMismatch(RaidenRecoverableError):
class InvalidChannelID(RaidenError):
class WithdrawMismatch(RaidenRecoverableError):
class InvalidChecksummedAddress(RaidenError):
class InvalidBinaryAddress(RaidenValidationError):
class InvalidSecret(RaidenError):
class InvalidSecretHash(RaidenError):
class InvalidAmount(RaidenError):
class InvalidSettleTimeout(RaidenError):
class InvalidRevealTimeout(RaidenError):
class InvalidSignature(RaidenError):
class InvalidPaymentIdentifier(RaidenError):
class SamePeerAddress(RaidenError):
class UnknownTokenAddress(RaidenError):
class TokenNotRegistered(RaidenError):
class AlreadyRegisteredTokenAddress(RaidenError):
class InvalidToken(RaidenError):
class MaxTokenNetworkNumberReached(RaidenError):
class InvalidTokenAddress(RaidenError):
class InvalidTokenNetworkDepositLimit(RaidenError):
class InvalidChannelParticipantDepositLimit(RaidenError):
class EthNodeInterfaceError(RaidenError):
class AddressWithoutCode(RaidenError):
class DuplicatedChannelError(RaidenRecoverableError):
class UnexpectedChannelState(RaidenRecoverableError):
class ContractCodeMismatch(RaidenError):
class APIServerPortInUseError(RaidenError):
class InvalidDBData(RaidenUnrecoverableError):
class InvalidBlockNumberInput(RaidenError):
class NoStateForBlockIdentifier(RaidenError):
class InvalidNumberInput(RaidenError):
class TransportError(RaidenError):
class ReplacementTransactionUnderpriced(RaidenError):
class EthereumNonceTooLow(RaidenUnrecoverableError):
class ChannelOutdatedError(RaidenError):
class InsufficientGasReserve(RaidenError):
class InsufficientEth(RaidenError):
class BrokenPreconditionError(RaidenError):
class ServiceRequestFailed(RaidenError):
class PFSReturnedError(ServiceRequestFailed):
def __init__(self, message: str, error_code: int, error_details: Dict[str, Any]) -> None:
args: List[Any] = [f"{message} (PFS error code: {error_code})"]
if error_details:
args.append(error_details)
super().__init__(*args)
self.message = error_code
self.error_code = error_code
self.error_details = error_details
@classmethod
def from_response(cls, response_json: Dict[str, Any]) -> "PFSReturnedError":
# error handling.
error_params = dict(
message=response_json.get("errors", ""),
error_code=response_json.get("error_code", 0),
error_details=response_json.get("error_details"),
)
if PFSError.is_iou_rejected(error_params["error_code"]):
return ServiceRequestIOURejected(**error_params)
return cls(**error_params)
class ServiceRequestIOURejected(PFSReturnedError):
class UndefinedMediationFee(RaidenError):
class TokenNetworkDeprecated(RaidenError):
class MintFailed(RaidenError):
class SerializationError(RaidenError):
class MatrixSyncMaxTimeoutReached(RaidenRecoverableError):
class ConfigurationError(RaidenError):
| true | true |
f7208908dd0f530934afcd736f133ab23168b854 | 1,172 | py | Python | setup.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | null | null | null | setup.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | null | null | null | setup.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | null | null | null | import os
from io import open
from typing import Dict
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
about: Dict[str, str] = {}
path = os.path.join(here, "awswrangler", "__metadata__.py")
with open(file=path, mode="r", encoding="utf-8") as f:
exec(f.read(), about)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
author="Igor Tavares",
url="https://github.com/awslabs/aws-data-wrangler",
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=long_description,
long_description_content_type="text/markdown",
license=about["__license__"],
packages=find_packages(exclude=["tests"]),
include_package_data=True,
python_requires=">=3.6, <3.10",
install_requires=open("requirements.txt").read().strip().split("\n"),
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
extras_require={"sqlserver": ["pyodbc~=4.0.30"]},
)
| 31.675676 | 73 | 0.664676 | import os
from io import open
from typing import Dict
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
about: Dict[str, str] = {}
path = os.path.join(here, "awswrangler", "__metadata__.py")
with open(file=path, mode="r", encoding="utf-8") as f:
exec(f.read(), about)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
author="Igor Tavares",
url="https://github.com/awslabs/aws-data-wrangler",
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=long_description,
long_description_content_type="text/markdown",
license=about["__license__"],
packages=find_packages(exclude=["tests"]),
include_package_data=True,
python_requires=">=3.6, <3.10",
install_requires=open("requirements.txt").read().strip().split("\n"),
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
extras_require={"sqlserver": ["pyodbc~=4.0.30"]},
)
| true | true |
f72089bdffb1bfe66db8bea55840dc0bef158c5f | 18,291 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_web_application_firewall_policies_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2020-05-12T23:29:15.000Z | 2020-05-12T23:29:15.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_web_application_firewall_policies_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_web_application_firewall_policies_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class WebApplicationFirewallPoliciesOperations(object):
"""WebApplicationFirewallPoliciesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2019-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-12-01"
self.config = config
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of WebApplicationFirewallPolicy
:rtype:
~azure.mgmt.network.v2019_12_01.models.WebApplicationFirewallPolicyPaged[~azure.mgmt.network.v2019_12_01.models.WebApplicationFirewallPolicy]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.WebApplicationFirewallPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the WAF policies in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of WebApplicationFirewallPolicy
:rtype:
~azure.mgmt.network.v2019_12_01.models.WebApplicationFirewallPolicyPaged[~azure.mgmt.network.v2019_12_01.models.WebApplicationFirewallPolicy]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.WebApplicationFirewallPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'}
def get(
self, resource_group_name, policy_name, custom_headers=None, raw=False, **operation_config):
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: WebApplicationFirewallPolicy or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_12_01.models.WebApplicationFirewallPolicy or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'}
def create_or_update(
self, resource_group_name, policy_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or update policy with specified rule set name within a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters:
~azure.mgmt.network.v2019_12_01.models.WebApplicationFirewallPolicy
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: WebApplicationFirewallPolicy or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_12_01.models.WebApplicationFirewallPolicy or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'}
def _delete_initial(
self, resource_group_name, policy_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, policy_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'}
| 46.780051 | 199 | 0.668526 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class WebApplicationFirewallPoliciesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-12-01"
self.config = config
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.WebApplicationFirewallPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.WebApplicationFirewallPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'}
def get(
self, resource_group_name, policy_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'}
def create_or_update(
self, resource_group_name, policy_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'}
def _delete_initial(
self, resource_group_name, policy_name, custom_headers=None, raw=False, **operation_config):
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, policy_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'}
| true | true |
f7208a0bce6ed5a17f46fdcb513416605ec0135f | 3,249 | py | Python | cfgov/v1/models/browse_page.py | atuggle/cfgov-refresh | 5a9cfd92b460b9be7befb39f5845abf56857aeac | [
"CC0-1.0"
] | null | null | null | cfgov/v1/models/browse_page.py | atuggle/cfgov-refresh | 5a9cfd92b460b9be7befb39f5845abf56857aeac | [
"CC0-1.0"
] | 1 | 2016-09-14T21:11:19.000Z | 2016-09-14T21:11:19.000Z | cfgov/v1/models/browse_page.py | atuggle/cfgov-refresh | 5a9cfd92b460b9be7befb39f5845abf56857aeac | [
"CC0-1.0"
] | null | null | null | from django.db import models
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel, ObjectList, StreamFieldPanel, TabbedInterface
)
from wagtail.wagtailcore import blocks
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore.models import PageManager
from data_research.blocks import (
ConferenceRegistrationForm, MortgageDataDownloads
)
from jobmanager.models import JobListingTable
from v1 import blocks as v1_blocks
from v1.atomic_elements import molecules, organisms
from v1.models.base import CFGOVPage
from v1.util.util import get_secondary_nav_items
class BrowsePage(CFGOVPage):
header = StreamField([
('text_introduction', molecules.TextIntroduction()),
('featured_content', molecules.FeaturedContent()),
], blank=True)
content = StreamField([
('bureau_structure', organisms.BureauStructure()),
('info_unit_group', organisms.InfoUnitGroup()),
('well', organisms.Well()),
('full_width_text', organisms.FullWidthText()),
('expandable', organisms.Expandable()),
('expandable_group', organisms.ExpandableGroup()),
('table_block', organisms.AtomicTableBlock(
table_options={'renderer': 'html'})),
('job_listing_table', JobListingTable()),
('feedback', v1_blocks.Feedback()),
('conference_registration_form', ConferenceRegistrationForm()),
('raw_html_block', blocks.RawHTMLBlock(
label='Raw HTML block')),
('html_block', organisms.HTMLBlock()),
('chart_block', organisms.ChartBlock()),
('mortgage_chart_block', organisms.MortgageChartBlock()),
('mortgage_map_block', organisms.MortgageMapBlock()),
('mortgage_downloads_block', MortgageDataDownloads()),
('snippet_list', organisms.SnippetList()),
('data_snapshot', organisms.DataSnapshot()),
('image_text_25_75_group', organisms.ImageText2575Group()),
('image_text_50_50_group', organisms.ImageText5050Group()),
('half_width_link_blob_group', organisms.HalfWidthLinkBlobGroup()),
('third_width_link_blob_group', organisms.ThirdWidthLinkBlobGroup()),
], blank=True)
secondary_nav_exclude_sibling_pages = models.BooleanField(default=False)
# General content tab
content_panels = CFGOVPage.content_panels + [
StreamFieldPanel('header'),
StreamFieldPanel('content'),
]
sidefoot_panels = CFGOVPage.sidefoot_panels + [
FieldPanel('secondary_nav_exclude_sibling_pages'),
]
# Tab handler interface
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='General Content'),
ObjectList(sidefoot_panels, heading='Sidebar'),
ObjectList(CFGOVPage.settings_panels, heading='Configuration'),
])
template = 'browse-basic/index.html'
objects = PageManager()
@property
def page_js(self):
return (
super(BrowsePage, self).page_js + ['secondary-navigation.js']
)
def get_context(self, request, *args, **kwargs):
context = super(BrowsePage, self).get_context(request, *args, **kwargs)
context.update({'get_secondary_nav_items': get_secondary_nav_items})
return context
| 37.77907 | 79 | 0.6996 | from django.db import models
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel, ObjectList, StreamFieldPanel, TabbedInterface
)
from wagtail.wagtailcore import blocks
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore.models import PageManager
from data_research.blocks import (
ConferenceRegistrationForm, MortgageDataDownloads
)
from jobmanager.models import JobListingTable
from v1 import blocks as v1_blocks
from v1.atomic_elements import molecules, organisms
from v1.models.base import CFGOVPage
from v1.util.util import get_secondary_nav_items
class BrowsePage(CFGOVPage):
header = StreamField([
('text_introduction', molecules.TextIntroduction()),
('featured_content', molecules.FeaturedContent()),
], blank=True)
content = StreamField([
('bureau_structure', organisms.BureauStructure()),
('info_unit_group', organisms.InfoUnitGroup()),
('well', organisms.Well()),
('full_width_text', organisms.FullWidthText()),
('expandable', organisms.Expandable()),
('expandable_group', organisms.ExpandableGroup()),
('table_block', organisms.AtomicTableBlock(
table_options={'renderer': 'html'})),
('job_listing_table', JobListingTable()),
('feedback', v1_blocks.Feedback()),
('conference_registration_form', ConferenceRegistrationForm()),
('raw_html_block', blocks.RawHTMLBlock(
label='Raw HTML block')),
('html_block', organisms.HTMLBlock()),
('chart_block', organisms.ChartBlock()),
('mortgage_chart_block', organisms.MortgageChartBlock()),
('mortgage_map_block', organisms.MortgageMapBlock()),
('mortgage_downloads_block', MortgageDataDownloads()),
('snippet_list', organisms.SnippetList()),
('data_snapshot', organisms.DataSnapshot()),
('image_text_25_75_group', organisms.ImageText2575Group()),
('image_text_50_50_group', organisms.ImageText5050Group()),
('half_width_link_blob_group', organisms.HalfWidthLinkBlobGroup()),
('third_width_link_blob_group', organisms.ThirdWidthLinkBlobGroup()),
], blank=True)
secondary_nav_exclude_sibling_pages = models.BooleanField(default=False)
content_panels = CFGOVPage.content_panels + [
StreamFieldPanel('header'),
StreamFieldPanel('content'),
]
sidefoot_panels = CFGOVPage.sidefoot_panels + [
FieldPanel('secondary_nav_exclude_sibling_pages'),
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='General Content'),
ObjectList(sidefoot_panels, heading='Sidebar'),
ObjectList(CFGOVPage.settings_panels, heading='Configuration'),
])
template = 'browse-basic/index.html'
objects = PageManager()
@property
def page_js(self):
return (
super(BrowsePage, self).page_js + ['secondary-navigation.js']
)
def get_context(self, request, *args, **kwargs):
context = super(BrowsePage, self).get_context(request, *args, **kwargs)
context.update({'get_secondary_nav_items': get_secondary_nav_items})
return context
| true | true |
f7208bc906110bb2c8cae40a156edcf4c7547c8c | 7,727 | py | Python | plugins/opencv/src/opencv/__init__.py | IGx89/scrypted | 577b00a090393f31aaa81de67f5fd4555995921a | [
"MIT"
] | null | null | null | plugins/opencv/src/opencv/__init__.py | IGx89/scrypted | 577b00a090393f31aaa81de67f5fd4555995921a | [
"MIT"
] | null | null | null | plugins/opencv/src/opencv/__init__.py | IGx89/scrypted | 577b00a090393f31aaa81de67f5fd4555995921a | [
"MIT"
] | null | null | null | from __future__ import annotations
from time import sleep
from detect import DetectionSession, DetectPlugin
from typing import Any, List
import numpy as np
import cv2
import imutils
from gi.repository import GLib, Gst
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected
class OpenCVDetectionSession(DetectionSession):
cap: cv2.VideoCapture
previous_frame: Any
def __init__(self) -> None:
super().__init__()
self.previous_frame = None
self.cap = None
defaultThreshold = 25
defaultArea = 2000
defaultInterval = 250
class OpenCVPlugin(DetectPlugin):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
if True:
self.retainAspectRatio = False
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
else:
self.retainAspectRatio = True
self.color2Gray = cv2.COLOR_BGRA2GRAY
self.pixelFormat = "BGRA"
self.pixelFormatChannelCount = 4
async def getDetectionModel(self) -> ObjectDetectionModel:
d: ObjectDetectionModel = {
'name': '@scrypted/opencv',
'classes': ['motion'],
}
settings = [
{
'title': "Motion Area",
'description': "The area size required to trigger motion. Higher values (larger areas) are less sensitive. Setting this to 0 will output all matches into the console.",
'value': defaultArea,
'key': 'area',
'placeholder': defaultArea,
'type': 'number',
},
{
'title': "Motion Threshold",
'description': "The threshold required to consider a pixel changed. Higher values (larger changes) are less sensitive.",
'value': defaultThreshold,
'key': 'threshold',
'placeholder': defaultThreshold,
'type': 'number',
},
{
'title': "Frame Analysis Interval",
'description': "The number of milliseconds to wait between motion analysis.",
'value': defaultInterval,
'key': 'interval',
'placeholder': defaultInterval,
'type': 'number',
},
]
d['settings'] = settings
return d
def get_pixel_format(self):
return self.pixelFormat
def parse_settings(self, settings: Any):
area = defaultArea
threshold = defaultThreshold
interval = defaultInterval
if settings:
area = float(settings.get('area', area))
threshold = int(settings.get('threshold', threshold))
interval = float(settings.get('interval', interval))
return area, threshold, interval
def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
area, threshold, interval = self.parse_settings(settings)
# see get_detection_input_size on undocumented size requirements for GRAY8
if self.color2Gray != None:
gray = cv2.cvtColor(frame, self.color2Gray)
else:
gray = frame
curFrame = cv2.GaussianBlur(gray, (21,21), 0)
if detection_session.previous_frame is None:
detection_session.previous_frame = curFrame
return
frameDelta = cv2.absdiff(detection_session.previous_frame, curFrame)
detection_session.previous_frame = curFrame
_, thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=2)
fcontours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(fcontours)
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = src_size
for c in contours:
x, y, w, h = cv2.boundingRect(c)
# if w * h != contour_area:
# print("mismatch w/h", contour_area - w * h)
x2, y2 = convert_to_src_size((x + w, y + h))
x, y = convert_to_src_size((x, y))
w = x2 - x + 1
h = y2 - y + 1
contour_area = w * h
if not area or contour_area > area:
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (x, y, w, h)
detection['className'] = 'motion'
detection['score'] = 1 if area else contour_area
detections.append(detection)
return detection_result
def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, min_score: float) -> ObjectsDetected:
raise Exception('can not run motion detection on jpeg')
def get_detection_input_size(self, src_size):
# The initial implementation of this plugin used BGRA
# because it seemed impossible to pull the Y frame out of I420 without corruption.
# This is because while 318x174 is aspect ratio correct,
# it seems to cause strange issues with stride and the image is skewed.
# By using 300x300, this seems to avoid some undocumented minimum size
# reqiurement in gst-videoscale or opencv. Unclear which.
# This is the same input size as tensorflow-lite. Allows for better pipelining.
if not self.retainAspectRatio:
return (300, 300)
width, height = src_size
if (width > height):
if (width > 318):
height = height / width * 318
width = 318
else:
if (height > 318):
width = width / height * 318
height = 318
width = int(np.floor(width / 6) * 6)
height = int(np.floor(height / 6) * 6)
return width, height
def end_session(self, detection_session: OpenCVDetectionSession):
if detection_session and detection_session.cap:
detection_session.cap.release()
detection_session.cap = None
return super().end_session(detection_session)
def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size)-> ObjectsDetected:
buf = gst_sample.get_buffer()
caps = gst_sample.get_caps()
# can't trust the width value, compute the stride
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
result, info = buf.map(Gst.MapFlags.READ)
if not result:
return
try:
mat = np.ndarray(
(height,
width,
self.pixelFormatChannelCount),
buffer=info.data,
dtype= np.uint8)
return self.detect(detection_session, mat, settings, src_size, convert_to_src_size)
finally:
buf.unmap(info)
def create_detection_session(self):
return OpenCVDetectionSession()
def detection_event_notified(self, settings: Any):
area, threshold, interval = self.parse_settings(settings)
# it is safe to block here because gstreamer creates a queue thread
sleep(interval / 1000)
return super().detection_event_notified(settings)
| 38.442786 | 184 | 0.608645 | from __future__ import annotations
from time import sleep
from detect import DetectionSession, DetectPlugin
from typing import Any, List
import numpy as np
import cv2
import imutils
from gi.repository import GLib, Gst
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected
class OpenCVDetectionSession(DetectionSession):
cap: cv2.VideoCapture
previous_frame: Any
def __init__(self) -> None:
super().__init__()
self.previous_frame = None
self.cap = None
defaultThreshold = 25
defaultArea = 2000
defaultInterval = 250
class OpenCVPlugin(DetectPlugin):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
if True:
self.retainAspectRatio = False
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
else:
self.retainAspectRatio = True
self.color2Gray = cv2.COLOR_BGRA2GRAY
self.pixelFormat = "BGRA"
self.pixelFormatChannelCount = 4
async def getDetectionModel(self) -> ObjectDetectionModel:
d: ObjectDetectionModel = {
'name': '@scrypted/opencv',
'classes': ['motion'],
}
settings = [
{
'title': "Motion Area",
'description': "The area size required to trigger motion. Higher values (larger areas) are less sensitive. Setting this to 0 will output all matches into the console.",
'value': defaultArea,
'key': 'area',
'placeholder': defaultArea,
'type': 'number',
},
{
'title': "Motion Threshold",
'description': "The threshold required to consider a pixel changed. Higher values (larger changes) are less sensitive.",
'value': defaultThreshold,
'key': 'threshold',
'placeholder': defaultThreshold,
'type': 'number',
},
{
'title': "Frame Analysis Interval",
'description': "The number of milliseconds to wait between motion analysis.",
'value': defaultInterval,
'key': 'interval',
'placeholder': defaultInterval,
'type': 'number',
},
]
d['settings'] = settings
return d
def get_pixel_format(self):
return self.pixelFormat
def parse_settings(self, settings: Any):
area = defaultArea
threshold = defaultThreshold
interval = defaultInterval
if settings:
area = float(settings.get('area', area))
threshold = int(settings.get('threshold', threshold))
interval = float(settings.get('interval', interval))
return area, threshold, interval
def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
area, threshold, interval = self.parse_settings(settings)
if self.color2Gray != None:
gray = cv2.cvtColor(frame, self.color2Gray)
else:
gray = frame
curFrame = cv2.GaussianBlur(gray, (21,21), 0)
if detection_session.previous_frame is None:
detection_session.previous_frame = curFrame
return
frameDelta = cv2.absdiff(detection_session.previous_frame, curFrame)
detection_session.previous_frame = curFrame
_, thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=2)
fcontours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(fcontours)
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = src_size
for c in contours:
x, y, w, h = cv2.boundingRect(c)
x2, y2 = convert_to_src_size((x + w, y + h))
x, y = convert_to_src_size((x, y))
w = x2 - x + 1
h = y2 - y + 1
contour_area = w * h
if not area or contour_area > area:
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (x, y, w, h)
detection['className'] = 'motion'
detection['score'] = 1 if area else contour_area
detections.append(detection)
return detection_result
def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, min_score: float) -> ObjectsDetected:
raise Exception('can not run motion detection on jpeg')
def get_detection_input_size(self, src_size):
if not self.retainAspectRatio:
return (300, 300)
width, height = src_size
if (width > height):
if (width > 318):
height = height / width * 318
width = 318
else:
if (height > 318):
width = width / height * 318
height = 318
width = int(np.floor(width / 6) * 6)
height = int(np.floor(height / 6) * 6)
return width, height
def end_session(self, detection_session: OpenCVDetectionSession):
if detection_session and detection_session.cap:
detection_session.cap.release()
detection_session.cap = None
return super().end_session(detection_session)
def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size)-> ObjectsDetected:
buf = gst_sample.get_buffer()
caps = gst_sample.get_caps()
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
result, info = buf.map(Gst.MapFlags.READ)
if not result:
return
try:
mat = np.ndarray(
(height,
width,
self.pixelFormatChannelCount),
buffer=info.data,
dtype= np.uint8)
return self.detect(detection_session, mat, settings, src_size, convert_to_src_size)
finally:
buf.unmap(info)
def create_detection_session(self):
return OpenCVDetectionSession()
def detection_event_notified(self, settings: Any):
area, threshold, interval = self.parse_settings(settings)
# it is safe to block here because gstreamer creates a queue thread
sleep(interval / 1000)
return super().detection_event_notified(settings)
| true | true |
f7208be42fc87aa1346f3bb00981d1c67e5429aa | 1,457 | py | Python | manager.py | Hugh-wong/hydra | 5f2c4770e655d41d3c535f6e3c29ec4848d5d60e | [
"MIT"
] | 3 | 2017-02-03T01:44:30.000Z | 2019-02-27T12:00:00.000Z | manager.py | Hugh-wong/hydra | 5f2c4770e655d41d3c535f6e3c29ec4848d5d60e | [
"MIT"
] | null | null | null | manager.py | Hugh-wong/hydra | 5f2c4770e655d41d3c535f6e3c29ec4848d5d60e | [
"MIT"
] | 1 | 2021-07-12T07:41:07.000Z | 2021-07-12T07:41:07.000Z | # coding=utf-8
import sys
import signal
import time
from multiprocessing import Process
from allocator import Allocator, Event
class Manager(object):
"""A manager manage multi allocators, when told to stop, manager would tell the allocator to stop."""
def __init__(self, cfg_list):
self.allocator_list = []
self.event_list = []
for cfg in cfg_list:
event = Event()
cfg.update({'poison': event})
self.allocator_list.append(Allocator(**cfg))
self.event_list.append(event)
def start_all(self):
"""start all the allocators"""
self.process_list = []
for allocator in self.allocator_list:
process = Process(target=allocator.start)
process.start()
self.process_list.append(process)
def stop_all(self, signal, frame):
"""stop all the allocators"""
for event in self.event_list:
event.set()
for process in self.process_list:
process.join()
sys.exit()
@classmethod
def trigger(cls, cfg_list):
"""outer interface"""
manager = cls(cfg_list)
manager.start_all()
signal.signal(signal.SIGINT, manager.stop_all)
signal.signal(signal.SIGTERM, manager.stop_all)
while True: # dead loop might meets many problem, better using a finite loop.
time.sleep(2)
manager.stop_all(None, None)
| 28.019231 | 105 | 0.617021 |
import sys
import signal
import time
from multiprocessing import Process
from allocator import Allocator, Event
class Manager(object):
def __init__(self, cfg_list):
self.allocator_list = []
self.event_list = []
for cfg in cfg_list:
event = Event()
cfg.update({'poison': event})
self.allocator_list.append(Allocator(**cfg))
self.event_list.append(event)
def start_all(self):
self.process_list = []
for allocator in self.allocator_list:
process = Process(target=allocator.start)
process.start()
self.process_list.append(process)
def stop_all(self, signal, frame):
for event in self.event_list:
event.set()
for process in self.process_list:
process.join()
sys.exit()
@classmethod
def trigger(cls, cfg_list):
manager = cls(cfg_list)
manager.start_all()
signal.signal(signal.SIGINT, manager.stop_all)
signal.signal(signal.SIGTERM, manager.stop_all)
while True:
time.sleep(2)
manager.stop_all(None, None)
| true | true |
f7208c7f6012bde16b47b8b7a1531f00d2196076 | 1,084 | py | Python | tests/test_creational/test_prototype.py | smartlegionlab/python-patterns | be898272e4358fa2e60ed9f61ce5ed10aa367e77 | [
"BSD-3-Clause"
] | 2 | 2021-11-17T21:35:49.000Z | 2022-02-09T16:47:20.000Z | tests/test_creational/test_prototype.py | smartlegionlab/python-patterns | be898272e4358fa2e60ed9f61ce5ed10aa367e77 | [
"BSD-3-Clause"
] | null | null | null | tests/test_creational/test_prototype.py | smartlegionlab/python-patterns | be898272e4358fa2e60ed9f61ce5ed10aa367e77 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under the terms of the BSD 3-Clause License
# (see LICENSE for details).
# Copyright © 2018-2021, A.A Suvorov
# All rights reserved.
# --------------------------------------------------------
"""Tests prototype.py"""
from patterns.creational.prototype import Bird
class TestPrototype:
def test_register(self, prototype, bird):
prototype.register('Bird', bird)
assert 'Bird' in prototype._objects
def test_unregister(self, prototype, bird):
prototype.register('Bird', bird)
prototype.unregister('Bird')
assert 'Bird' not in prototype._objects
def test_clone(self, prototype, bird):
prototype.register('Bird', bird)
duck = prototype.clone('Bird', {'name': 'Duck'})
assert isinstance(duck, Bird)
def test_get_attr(self, prototype, bird):
prototype.register('Bird', bird)
duck = prototype.clone('Bird', {'name': 'Duck'})
assert getattr(duck, 'name')
assert duck.name == 'Duck'
| 33.875 | 58 | 0.580258 |
from patterns.creational.prototype import Bird
class TestPrototype:
def test_register(self, prototype, bird):
prototype.register('Bird', bird)
assert 'Bird' in prototype._objects
def test_unregister(self, prototype, bird):
prototype.register('Bird', bird)
prototype.unregister('Bird')
assert 'Bird' not in prototype._objects
def test_clone(self, prototype, bird):
prototype.register('Bird', bird)
duck = prototype.clone('Bird', {'name': 'Duck'})
assert isinstance(duck, Bird)
def test_get_attr(self, prototype, bird):
prototype.register('Bird', bird)
duck = prototype.clone('Bird', {'name': 'Duck'})
assert getattr(duck, 'name')
assert duck.name == 'Duck'
| true | true |
f7208ce7bbb8661f4bd13d02bdc81b7510d9775a | 195 | py | Python | Demo/log/example_log_error_file.py | quecpython/EC100Y-SDK | 712c7eb7b54a3971009d94f6d6b21a6011d56f68 | [
"MIT"
] | 4 | 2021-01-28T01:30:59.000Z | 2021-06-15T07:13:41.000Z | Demo/log/example_log_error_file.py | QuePython/EC100Y-SDK | 712c7eb7b54a3971009d94f6d6b21a6011d56f68 | [
"MIT"
] | null | null | null | Demo/log/example_log_error_file.py | QuePython/EC100Y-SDK | 712c7eb7b54a3971009d94f6d6b21a6011d56f68 | [
"MIT"
] | 3 | 2021-04-07T09:55:59.000Z | 2022-01-08T15:15:23.000Z | import log
log.basicConfig(level=log.ERROR) # 设置日志输出级别
# 获取logger对象,如果不指定name则返回root对象,多次使用相同的name调用getLogger方法返回同一个logger对象
log = log.getLogger("error")
log.error("Test error message!!")
| 21.666667 | 69 | 0.784615 | import log
log.basicConfig(level=log.ERROR)
log = log.getLogger("error")
log.error("Test error message!!")
| true | true |
f7208d45b1bd9c79cf4bba8f873334a67415881d | 2,026 | py | Python | Python3_Data_Structure/32_Python_Amortized_Analysis/00_Python_PAA.py | jmmedel/Python3-Data-Structure-References | 3a607da2b67b5b80810d7084339e0602288c4f6b | [
"MIT"
] | null | null | null | Python3_Data_Structure/32_Python_Amortized_Analysis/00_Python_PAA.py | jmmedel/Python3-Data-Structure-References | 3a607da2b67b5b80810d7084339e0602288c4f6b | [
"MIT"
] | null | null | null | Python3_Data_Structure/32_Python_Amortized_Analysis/00_Python_PAA.py | jmmedel/Python3-Data-Structure-References | 3a607da2b67b5b80810d7084339e0602288c4f6b | [
"MIT"
] | null | null | null |
"""
Python - Amortized Analysis
Amortized analysis involves estimating the run time for the sequence of operations in a program without taking into consideration the span of the data distribution in the input values. A simple example is finding a value in a sorted list is quicker than in an unsorted list. If the list is already sorted, it does not matter how distributed the data is. But of course the length of the list has an impact as it decides the number of steps the algorithm has to go through to get the final result.
So we see that if the initial cost of a single step of obtaining a sorted list is high, then the cost of subsequent steps of finding an element becomes considerably low. So Amortized analysis helps us find a bound on the worst-case running time for a sequence of operations. There are three approaches to amortized analysis.
Accounting Method − This involves assigning a cost to each operation performed. If the actual operation finishes quicker than the assigned time then some positive credit is accumulated in the analysis. In the reverse scenario it will be negative credit. To keep track of these accumulated credits, we use a stack or tree data structure. The operations which are carried out early ( like sorting the list) have high amortized cost but the operations that are late in sequence have lower amortized cost as the accumulated credit is utilized. So the amortized cost is an upper bound of actual cost.
Potential Method − In this method the saved credit is utilized for future operations as mathematical function of the state of the data structure. The evaluation of the mathematical function and the amortized cost should be equal. So when the actual cost is greater than amortized cost there is a decrease in potential and it is used utilized for future operations which are expensive.
Aggregate analysis − In this method we estimate the upper bound on the total cost of n steps. The amortized cost is a simple division of total cost and the number of steps (n)..
"""
| 106.631579 | 595 | 0.80306 | true | true | |
f7208d73df1d4b1e754b10c36ce77d84bdc0b130 | 7,009 | py | Python | loadgen/generate_load.py | hythloda/ecommerce-demo | 83d23475677d546db59879452f3e388581ab88de | [
"Apache-2.0"
] | null | null | null | loadgen/generate_load.py | hythloda/ecommerce-demo | 83d23475677d546db59879452f3e388581ab88de | [
"Apache-2.0"
] | null | null | null | loadgen/generate_load.py | hythloda/ecommerce-demo | 83d23475677d546db59879452f3e388581ab88de | [
"Apache-2.0"
] | null | null | null | import barnum, random, time, json, requests, math, os
from mysql.connector import connect, Error
from kafka import KafkaProducer
# CONFIG
userSeedCount = 10000
itemSeedCount = 1000
purchaseGenCount = 500000
purchaseGenEveryMS = 100
pageviewMultiplier = 75 # Translates to 75x purchases, currently 750/sec or 65M/day
itemInventoryMin = 1000
itemInventoryMax = 5000
itemPriceMin = 5
itemPriceMax = 500
mysqlHost = 'mysql'
mysqlPort = '3306'
mysqlUser = 'root'
mysqlPass = 'debezium'
kafkaHostPort = os.getenv('KAFKA_ADDR', 'kafka:9092')
kafkaTopic = 'pageviews'
debeziumHostPort = 'debezium:8083'
channels = ['organic search', 'paid search', 'referral', 'social', 'display']
categories = ['widgets', 'gadgets', 'doodads', 'clearance']
# INSERT TEMPLATES
item_insert = "INSERT INTO shop.items (name, category, price, inventory) VALUES ( %s, %s, %s, %s )"
user_insert = "INSERT INTO shop.users (email, is_vip) VALUES ( %s, %s )"
purchase_insert = "INSERT INTO shop.purchases (user_id, item_id, quantity, purchase_price) VALUES ( %s, %s, %s, %s )"
#Initialize Debezium (Kafka Connect Component)
requests.post(('http://%s/connectors' % debeziumHostPort),
json={
"name": "mysql-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"database.hostname": mysqlHost,
"database.port": mysqlPort,
"database.user": mysqlUser,
"database.password": mysqlPass,
"database.server.name": mysqlHost,
"database.server.id": '1234',
"database.history.kafka.bootstrap.servers": kafkaHostPort,
"database.history.kafka.topic": "mysql-history",
"time.precision.mode": "connect"
}
}
)
#Initialize Kafka
producer = KafkaProducer(bootstrap_servers=[kafkaHostPort],
value_serializer=lambda x:
json.dumps(x).encode('utf-8'))
def generatePageview(viewer_id, target_id, page_type):
return {
"user_id": viewer_id,
"url": f'/{page_type}/{target_id}',
"channel": random.choice(channels),
"received_at": int(time.time())
}
try:
with connect(
host=mysqlHost,
user=mysqlUser,
password=mysqlPass,
) as connection:
with connection.cursor() as cursor:
print("Initializing shop database...")
cursor.execute('CREATE DATABASE IF NOT EXISTS shop;')
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.users
(
id SERIAL PRIMARY KEY,
email VARCHAR(255),
is_vip BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.items
(
id SERIAL PRIMARY KEY,
name VARCHAR(100),
category VARCHAR(100),
price DECIMAL(7,2),
inventory INT,
inventory_updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.purchases
(
id SERIAL PRIMARY KEY,
user_id BIGINT UNSIGNED REFERENCES user(id),
item_id BIGINT UNSIGNED REFERENCES item(id),
status TINYINT UNSIGNED DEFAULT 1,
quantity INT UNSIGNED DEFAULT 1,
purchase_price DECIMAL(12,2),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
connection.commit()
print("Seeding data...")
cursor.executemany(
item_insert,
[
(
barnum.create_nouns(),
random.choice(categories),
random.randint(itemPriceMin*100,itemPriceMax*100)/100,
random.randint(itemInventoryMin,itemInventoryMax)
) for i in range(itemSeedCount)
]
)
cursor.executemany(
user_insert,
[
(
barnum.create_email(),
(random.randint(0,10) > 8)
) for i in range(userSeedCount)
]
)
connection.commit()
print("Getting item ID and PRICEs...")
cursor.execute("SELECT id, price FROM shop.items")
item_prices = [(row[0], row[1]) for row in cursor]
print("Preparing to loop + seed kafka pageviews and purchases")
for i in range(purchaseGenCount):
# Get a user and item to purchase
purchase_item = random.choice(item_prices)
purchase_user = random.randint(0,userSeedCount-1)
purchase_quantity = random.randint(1,5)
# Write purchaser pageview
producer.send(kafkaTopic, key=str(purchase_user).encode('ascii'), value=generatePageview(purchase_user, purchase_item[0], 'products'))
# Write random pageviews to products or profiles
pageviewOscillator = int(pageviewMultiplier + (math.sin(time.time()/1000)*50))
for i in range(pageviewOscillator):
rand_user = random.randint(0,userSeedCount)
rand_page_type = random.choice(['products', 'profiles'])
target_id_max_range = itemSeedCount if rand_page_type == 'products' else userSeedCount
producer.send(kafkaTopic, key=str(rand_user).encode('ascii'), value=generatePageview(rand_user, random.randint(0,target_id_max_range), rand_page_type))
# Write purchase row
cursor.execute(
purchase_insert,
(
purchase_user,
purchase_item[0],
purchase_quantity,
purchase_item[1] * purchase_quantity
)
)
connection.commit()
#Pause
time.sleep(purchaseGenEveryMS/1000)
connection.close()
except Error as e:
print(e)
| 40.75 | 171 | 0.541304 | import barnum, random, time, json, requests, math, os
from mysql.connector import connect, Error
from kafka import KafkaProducer
userSeedCount = 10000
itemSeedCount = 1000
purchaseGenCount = 500000
purchaseGenEveryMS = 100
pageviewMultiplier = 75
itemInventoryMin = 1000
itemInventoryMax = 5000
itemPriceMin = 5
itemPriceMax = 500
mysqlHost = 'mysql'
mysqlPort = '3306'
mysqlUser = 'root'
mysqlPass = 'debezium'
kafkaHostPort = os.getenv('KAFKA_ADDR', 'kafka:9092')
kafkaTopic = 'pageviews'
debeziumHostPort = 'debezium:8083'
channels = ['organic search', 'paid search', 'referral', 'social', 'display']
categories = ['widgets', 'gadgets', 'doodads', 'clearance']
item_insert = "INSERT INTO shop.items (name, category, price, inventory) VALUES ( %s, %s, %s, %s )"
user_insert = "INSERT INTO shop.users (email, is_vip) VALUES ( %s, %s )"
purchase_insert = "INSERT INTO shop.purchases (user_id, item_id, quantity, purchase_price) VALUES ( %s, %s, %s, %s )"
requests.post(('http://%s/connectors' % debeziumHostPort),
json={
"name": "mysql-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"database.hostname": mysqlHost,
"database.port": mysqlPort,
"database.user": mysqlUser,
"database.password": mysqlPass,
"database.server.name": mysqlHost,
"database.server.id": '1234',
"database.history.kafka.bootstrap.servers": kafkaHostPort,
"database.history.kafka.topic": "mysql-history",
"time.precision.mode": "connect"
}
}
)
producer = KafkaProducer(bootstrap_servers=[kafkaHostPort],
value_serializer=lambda x:
json.dumps(x).encode('utf-8'))
def generatePageview(viewer_id, target_id, page_type):
return {
"user_id": viewer_id,
"url": f'/{page_type}/{target_id}',
"channel": random.choice(channels),
"received_at": int(time.time())
}
try:
with connect(
host=mysqlHost,
user=mysqlUser,
password=mysqlPass,
) as connection:
with connection.cursor() as cursor:
print("Initializing shop database...")
cursor.execute('CREATE DATABASE IF NOT EXISTS shop;')
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.users
(
id SERIAL PRIMARY KEY,
email VARCHAR(255),
is_vip BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.items
(
id SERIAL PRIMARY KEY,
name VARCHAR(100),
category VARCHAR(100),
price DECIMAL(7,2),
inventory INT,
inventory_updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.purchases
(
id SERIAL PRIMARY KEY,
user_id BIGINT UNSIGNED REFERENCES user(id),
item_id BIGINT UNSIGNED REFERENCES item(id),
status TINYINT UNSIGNED DEFAULT 1,
quantity INT UNSIGNED DEFAULT 1,
purchase_price DECIMAL(12,2),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
connection.commit()
print("Seeding data...")
cursor.executemany(
item_insert,
[
(
barnum.create_nouns(),
random.choice(categories),
random.randint(itemPriceMin*100,itemPriceMax*100)/100,
random.randint(itemInventoryMin,itemInventoryMax)
) for i in range(itemSeedCount)
]
)
cursor.executemany(
user_insert,
[
(
barnum.create_email(),
(random.randint(0,10) > 8)
) for i in range(userSeedCount)
]
)
connection.commit()
print("Getting item ID and PRICEs...")
cursor.execute("SELECT id, price FROM shop.items")
item_prices = [(row[0], row[1]) for row in cursor]
print("Preparing to loop + seed kafka pageviews and purchases")
for i in range(purchaseGenCount):
purchase_item = random.choice(item_prices)
purchase_user = random.randint(0,userSeedCount-1)
purchase_quantity = random.randint(1,5)
producer.send(kafkaTopic, key=str(purchase_user).encode('ascii'), value=generatePageview(purchase_user, purchase_item[0], 'products'))
pageviewOscillator = int(pageviewMultiplier + (math.sin(time.time()/1000)*50))
for i in range(pageviewOscillator):
rand_user = random.randint(0,userSeedCount)
rand_page_type = random.choice(['products', 'profiles'])
target_id_max_range = itemSeedCount if rand_page_type == 'products' else userSeedCount
producer.send(kafkaTopic, key=str(rand_user).encode('ascii'), value=generatePageview(rand_user, random.randint(0,target_id_max_range), rand_page_type))
cursor.execute(
purchase_insert,
(
purchase_user,
purchase_item[0],
purchase_quantity,
purchase_item[1] * purchase_quantity
)
)
connection.commit()
time.sleep(purchaseGenEveryMS/1000)
connection.close()
except Error as e:
print(e)
| true | true |
f7208e835090cc50acccc601a70a34153f65abaf | 549 | py | Python | gcn/lp.py | liqimai/GraphConvForSSL | ef94a897292275680b1058685f2de9d4a8a6449c | [
"MIT"
] | 74 | 2019-04-09T11:53:27.000Z | 2022-03-24T09:22:30.000Z | gcn/lp.py | liqimai/GraphConvForSSL | ef94a897292275680b1058685f2de9d4a8a6449c | [
"MIT"
] | 4 | 2019-07-11T08:47:29.000Z | 2020-06-15T03:19:31.000Z | gcn/lp.py | liqimai/GraphConvForSSL | ef94a897292275680b1058685f2de9d4a8a6449c | [
"MIT"
] | 16 | 2019-04-15T16:20:07.000Z | 2022-03-07T08:42:26.000Z | import numpy as np
from gcn.graphconv import ap_approximate
def Model17(adj, alpha, y_train, y_test):
k = int(np.ceil(4 * alpha))
prediction, time = ap_approximate(adj, y_train, alpha, k)
predicted_labels = np.argmax(prediction, axis=1)
prediction = np.zeros(prediction.shape)
prediction[np.arange(prediction.shape[0]), predicted_labels] = 1
test_acc = np.sum(prediction * y_test) / np.sum(y_test)
test_acc_of_class = np.sum(prediction * y_test, axis=0) / np.sum(y_test, axis=0)
return test_acc, test_acc_of_class
| 36.6 | 84 | 0.717668 | import numpy as np
from gcn.graphconv import ap_approximate
def Model17(adj, alpha, y_train, y_test):
k = int(np.ceil(4 * alpha))
prediction, time = ap_approximate(adj, y_train, alpha, k)
predicted_labels = np.argmax(prediction, axis=1)
prediction = np.zeros(prediction.shape)
prediction[np.arange(prediction.shape[0]), predicted_labels] = 1
test_acc = np.sum(prediction * y_test) / np.sum(y_test)
test_acc_of_class = np.sum(prediction * y_test, axis=0) / np.sum(y_test, axis=0)
return test_acc, test_acc_of_class
| true | true |
f7208e8a4a26e682a31e9e84d47b5e97601f74d8 | 173 | py | Python | learning_greek/signals.py | lucafavatella/learning-greek | b29a96668992823e2ec89547b6c82fbbbb9af9f3 | [
"MIT"
] | 10 | 2015-04-10T06:35:01.000Z | 2021-07-19T01:40:22.000Z | learning_greek/signals.py | lucafavatella/learning-greek | b29a96668992823e2ec89547b6c82fbbbb9af9f3 | [
"MIT"
] | 16 | 2015-02-08T16:39:01.000Z | 2018-06-10T16:14:44.000Z | learning_greek/signals.py | lucafavatella/learning-greek | b29a96668992823e2ec89547b6c82fbbbb9af9f3 | [
"MIT"
] | 6 | 2015-02-12T18:56:40.000Z | 2020-10-11T18:59:37.000Z | import django.dispatch
adoption_level_change = django.dispatch.Signal(providing_args=["level", "request"])
blurb_read = django.dispatch.Signal(providing_args=["request"])
| 28.833333 | 83 | 0.797688 | import django.dispatch
adoption_level_change = django.dispatch.Signal(providing_args=["level", "request"])
blurb_read = django.dispatch.Signal(providing_args=["request"])
| true | true |
f7208edc797411262aa09c8538bfe7878909fc92 | 1,273 | py | Python | test/test_sync_reports_rotate.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_sync_reports_rotate.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_sync_reports_rotate.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.sync_reports_rotate import SyncReportsRotate
class TestSyncReportsRotate(unittest.TestCase):
""" SyncReportsRotate unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testSyncReportsRotate(self):
"""
Test SyncReportsRotate
"""
model = swagger_client.models.sync_reports_rotate.SyncReportsRotate()
if __name__ == '__main__':
unittest.main() | 25.979592 | 77 | 0.735271 |
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.sync_reports_rotate import SyncReportsRotate
class TestSyncReportsRotate(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSyncReportsRotate(self):
model = swagger_client.models.sync_reports_rotate.SyncReportsRotate()
if __name__ == '__main__':
unittest.main() | true | true |
f7208fe4ecb7d3a8e41c130280425906b33dd803 | 47,072 | py | Python | BioSTEAM 2.x.x/biorefineries/TAL/system_TAL_glucose.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-03T21:04:41.000Z | 2020-01-09T01:15:48.000Z | BioSTEAM 2.x.x/biorefineries/TAL/system_TAL_glucose.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 6 | 2020-01-03T21:31:27.000Z | 2020-02-28T13:53:56.000Z | BioSTEAM 2.x.x/biorefineries/TAL/system_TAL_glucose.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-07T14:04:06.000Z | 2020-01-08T23:05:25.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Bioindustrial-Park: BioSTEAM's Premier Biorefinery Models and Results
# Copyright (C) 2022-2023, Sarang Bhagwat <sarangb2@illinois.edu> (this biorefinery)
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
@author: sarangbhagwat
Created on Sun Aug 23 12:11:15 2020
This module is a modified implementation of modules from the following:
[1] Bhagwat et al., Sustainable Production of Acrylic Acid via 3-Hydroxypropionic Acid from Lignocellulosic Biomass. ACS Sustainable Chem. Eng. 2021, 9 (49), 16659–16669. https://doi.org/10.1021/acssuschemeng.1c05441
[2] Li et al., Sustainable Lactic Acid Production from Lignocellulosic Biomass. ACS Sustainable Chem. Eng. 2021, 9 (3), 1341–1351. https://doi.org/10.1021/acssuschemeng.0c08055
[3] Cortes-Peña et al., BioSTEAM: A Fast and Flexible Platform for the Design, Simulation, and Techno-Economic Analysis of Biorefineries under Uncertainty. ACS Sustainable Chem. Eng. 2020, 8 (8), 3302–3310. https://doi.org/10.1021/acssuschemeng.9b07040
All units are explicitly defined here for transparency and easy reference.
Naming conventions:
D = Distillation column
AC = Adsorption column
F = Flash tank or multiple-effect evaporator
H = Heat exchange
M = Mixer
P = Pump (including conveying belt)
R = Reactor
S = Splitter (including solid/liquid separator)
T = Tank or bin for storage
U = Other units
Processes:
100: Feedstock preprocessing
200: Pretreatment
300: Conversion
400: Separation
500: Wastewater treatment
600: Storage
700: Co-heat and power
800: Cooling utility generation
900: Miscellaneous facilities
1000: Heat exchanger network
"""
# %% Setup
import biosteam as bst
import thermosteam as tmo
import flexsolve as flx
import numpy as np
from math import exp as math_exp
# from biosteam import main_flowsheet as F
# from copy import deepcopy
# from biosteam import System
from thermosteam import Stream
# from biorefineries.cornstover import CellulosicEthanolTEA
from biorefineries.TAL import units, facilities
from biorefineries.TAL._process_specification import ProcessSpecification
from biorefineries.TAL.process_settings import price, CFs
from biorefineries.TAL.utils import find_split, splits_df, baseline_feedflow
from biorefineries.TAL.chemicals_data import TAL_chemicals, chemical_groups, \
soluble_organics, combustibles
# from biorefineries.TAL.tea import TALTEA
from biorefineries.cornstover import CellulosicEthanolTEA as TALTEA
from biosteam import SystemFactory
from warnings import filterwarnings
filterwarnings('ignore')
Rxn = tmo.reaction.Reaction
ParallelRxn = tmo.reaction.ParallelReaction
# from lactic.hx_network import HX_Network
# # Do this to be able to show more streams in a diagram
# bst.units.Mixer._graphics.edge_in *= 2
bst.speed_up()
flowsheet = bst.Flowsheet('TAL')
bst.main_flowsheet.set_flowsheet(flowsheet)
# Speeds up ShortcutDistillation
bst.units.ShortcutColumn.minimum_guess_distillate_recovery = 0
# Baseline cost year is 2016
bst.CE = 541.7
# _labor_2007to2016 = 22.71 / 19.55
# Set default thermo object for the system
tmo.settings.set_thermo(TAL_chemicals)
# %% Utils
R = 8.314
TAL_Hm = 30883.66976 # by Dannenfelser-Yalkowsky method
TAL_Tm = TAL_chemicals['TAL'].Tm # 458.15 K
TAL_c = 6056.69421768496 # fitted parameter
TAL_c_by_R = TAL_c/R
TAL_Hm_by_R = TAL_Hm/R
def get_TAL_solubility_in_water(T): # mol TAL : mol (TAL+water)
return math_exp(-(TAL_Hm_by_R) * (1/T - 1/TAL_Tm))/math_exp(TAL_c_by_R/T)
def get_mol_TAL_dissolved(T, mol_water):
TAL_x = get_TAL_solubility_in_water(T)
return mol_water*TAL_x/(1-TAL_x)
def get_TAL_solubility_in_water_gpL(T):
return get_mol_TAL_dissolved(T, 1000./18.)*TAL_chemicals['TAL'].MW
def get_K(chem_ID, stream, phase_1, phase_2):
return (stream[phase_1].imol[chem_ID]/stream[phase_1].F_mol)/max(1e-6, (stream[phase_2].imol[chem_ID]/stream[phase_2].F_mol))
def get_TAL_solublity_in_solvent_very_rough(T, solvent_ID='Hexanol', units='g/L'):
temp_stream =\
tmo.Stream('temp_stream_get_TAL_solublity_in_solvent_very_rough')
mol_water = mol_solvent = 1000
mol_TAL = get_mol_TAL_dissolved(T, mol_water)
temp_stream.imol['Water'] = mol_water
temp_stream.imol[solvent_ID] = mol_solvent
temp_stream.imol['TAL'] = mol_TAL
temp_stream.lle(T=T, P=temp_stream.P)
# temp_stream.show(N=100)
phase_1 = 'l' if temp_stream.imol['l', solvent_ID] > temp_stream.imol['L', solvent_ID] else 'L'
phase_2 = 'L' if phase_1=='l' else 'l'
K_TAL_in_extract = get_K('TAL', temp_stream, phase_1, phase_2)
# print(K_TAL_in_extract)
if units=='g/L':
temp_stream_2 = tmo.Stream('temp_stream_2_get_TAL_solublity_in_solvent_very_rough')
temp_stream_2.imol['TAL'] = K_TAL_in_extract*mol_TAL
temp_stream_2.imol[solvent_ID] = mol_solvent
return temp_stream_2.imass['TAL']/temp_stream_2.F_vol
elif units=='mol/mol':
return K_TAL_in_extract*mol_TAL/(mol_TAL+mol_solvent) #
def get_TAL_solubility_in_hexanol():
return 2.*0.0222/(2.*0.0222+0.951) # mol/mol; 2 * Marco's initial experimental solubility of 2.8 wt% at 21 C
def get_TAL_solubility_in_ethanol_ww():
return 0.167682 # solubility of 157.425 g-TAL per L-ethanol
# %%
@SystemFactory(ID = 'TAL_sys')
def create_TAL_sys(ins, outs):
# %%
# =============================================================================
# Feedstock
# =============================================================================
# feedstock = Stream('feedstock',
# baseline_feedflow.copy(),
# units='kg/hr',
# price=price['Feedstock'])
feedstock = Stream('feedstock')
feedstock.imass['Glucose'] = 29000.
feedstock.imass['H2O'] = 500.
feedstock.price = price['Glucose']*feedstock.imass['Glucose']/feedstock.F_mass
feedstock.F_mass = 25802.9 # at the baseline, the amount of TAL produced would exactly satisfy the US demand for sorbic acid with a hypothetical 100% TAL->sorbic acid conversion.
U101 = units.FeedstockPreprocessing('U101', ins=feedstock)
# Handling costs/utilities included in feedstock cost thus not considered here
U101.cost_items['System'].cost = 0
U101.cost_items['System'].kW = 0
# %%
# =============================================================================
# Conversion streams
# =============================================================================
# Flow and price will be updated in EnzymeHydrolysateMixer
enzyme = Stream('enzyme', units='kg/hr', price=price['Enzyme'])
# Used to adjust enzymatic hydrolysis solid loading, will be updated in EnzymeHydrolysateMixer
enzyme_water = Stream('enzyme_water', units='kg/hr')
# Corn steep liquor as nitrogen nutrient for microbes, flow updated in R301
CSL = Stream('CSL', units='kg/hr')
# Lime for neutralization of produced acid
# fermentation_lime = Stream('fermentation_lime', units='kg/hr')
# For diluting concentrated, inhibitor-reduced hydrolysate
dilution_water = Stream('dilution_water', units='kg/hr')
# =============================================================================
# Conversion units
# =============================================================================
# Cool hydrolysate down to fermentation temperature at 50°C
H301 = bst.units.HXutility('H301', ins=U101-0, T=50+273.15)
M304 = bst.units.Mixer('M304', ins=(H301-0, dilution_water))
M304.water_to_sugar_mol_ratio = 5.
@M304.add_specification()
def adjust_M304_water():
M304_ins_1 = M304.ins[1]
M304_ins_1.imol['Water'] = M304.water_to_sugar_mol_ratio * M304.ins[0].imol['Glucose', 'Xylose'].sum()
M304._run()
# M304.specification = adjust_M304_water()
M304_H = bst.units.HXutility('M304_H', ins=M304-0, T=30+273.15, rigorous=True)
# Mix pretreatment hydrolysate/enzyme mixture with fermentation seed
S302 = bst.Splitter('S302', ins=M304_H-0,
outs = ('to_seedtrain', 'to_cofermentation'),
split = 0.07) # split = inoculum ratio
# Cofermentation
R302 = units.CoFermentation('R302',
ins=(S302-1, '', CSL),
outs=('fermentation_effluent', 'CO2_fermentation'))
def include_seed_CSL_in_cofermentation(): # note: effluent always has 0 CSL
R302._run()
R302.ins[2].F_mass*=1./(1-S302.split[0])
R302.specification = include_seed_CSL_in_cofermentation
# ferm_ratio is the ratio of conversion relative to the fermenter
R303 = units.SeedTrain('R303', ins=S302-0, outs=('seed', 'CO2_seedtrain'), ferm_ratio=0.95)
T301 = units.SeedHoldTank('T301', ins=R303-0, outs=1-R302)
# %%
# =============================================================================
# Separation streams
# =============================================================================
# This flow will be automatically updated in CellMassFilter
# separation_sulfuric_acid = Stream('separation_sulfuric_acid', units='kg/hr')
# # # To be mixed with sulfuric acid, will be updated in SulfuricAdditionTank
# # separation_acid_water = Stream('separation_acid_water', units='kg/hr')
# separation_DPHP = Stream('DPHP', DPHP =feedstock_dry_mass*22.1/1000*0.93,
# H2O=feedstock_dry_mass*22.1/1000*0.07, units='kg/hr')
# # Ethanol for esterification reaction, will be updated in the EsterificationReactor
# separation_ethanol = Stream('separation_ethanol', Ethanol=feedstock_dry_mass*22.1/1000*0.93,
# H2O=feedstock_dry_mass*22.1/1000*0.07, units='kg/hr')
# For ester hydrolysis
# separation_hydrolysis_water = Stream('separation_hydrolysis_water', units='kg/hr')
Hexanol_minimal = Stream('Hexanol_minimal', units = 'kg/hr')
# Heptane = Stream('Heptane', units = 'kg/hr')
# Toluene = Stream('Toluene', units = 'kg/hr')
# Hexanol_s = Stream('Hexanol_s', units = 'kg/hr')
Heptane_s = Stream('Heptane_s', units = 'kg/hr')
Toluene_s = Stream('Toluene_s', units = 'kg/hr')
Hydrogen = Stream('Hydrogen', units = 'kg/hr')
KOH = Stream('KOH', units = 'kg/hr')
HCl = Stream('HCl', units = 'kg/hr')
# =============================================================================
# Separation units
# =============================================================================
# Fake unit to enable solid-liquid equilibrium for fermentation broth
U401 = bst.Unit('U401', ins=R302-0, outs=('fermentation_broth_first_sle'))
def U401_spec():
U401_ins_0 = U401.ins[0]
tot_TAL = U401_ins_0.imol['TAL']
U401_outs_0 = U401.outs[0]
U401_outs_0.copy_like(U401_ins_0)
mol_TAL_dissolved = get_mol_TAL_dissolved(U401_outs_0.T, U401_outs_0.imol['Water'])
# U401_outs_0.sle('TAL', U401_outs_0.T) #!!! TODO: use computationally cheaper way of changing from Stream to MultiStream
U401_outs_0.phases=['s', 'l']
U401_outs_0.imol['l', 'TAL'] = min(mol_TAL_dissolved, tot_TAL)
U401_outs_0.imol['s', 'TAL'] = tot_TAL - min(mol_TAL_dissolved, tot_TAL)
U401.specification = U401_spec
# Change broth temperature to adjust TAL solubility
H401 = bst.HXutility('H401', ins=U401-0, outs=('H401_0'), T=273.15+70.)
def H401_spec():
H401_ins_0 = H401.ins[0]
H401_ins_0_water=H401_ins_0.imol['Water']
tot_TAL = H401_ins_0.imol['TAL']
# H401_spec_obj_fn = lambda T: get_TAL_solubility_in_water_gpL(T) -\
# H401_ins_0.imass['TAL']/H401_ins_0.F_vol
H401_spec_obj_fn = lambda T: get_mol_TAL_dissolved(T, H401_ins_0_water) - tot_TAL
H401.T = flx.IQ_interpolation(H401_spec_obj_fn, H401.ins[0].T, 99.+273.15)
H401._run()
H401_outs_0 = H401.outs[0]
mol_TAL_dissolved = get_mol_TAL_dissolved(H401_outs_0.T, H401_outs_0.imol['Water'])
# H401_outs_0.sle('TAL', H401_outs_0.T) #!!! TODO: use computationally cheaper way of changing from Stream to MultiStream
H401_outs_0.phases = ('l', 's')
H401_outs_0.imol['l', 'TAL'] = min(mol_TAL_dissolved, tot_TAL)
H401_outs_0.imol['s', 'TAL'] = max(0., round(tot_TAL - min(mol_TAL_dissolved, tot_TAL), 5))
# H401_outs_0.imol['s', 'TAL'] = max(0.0001, tot_TAL - min(mol_TAL_dissolved, tot_TAL))
# if H401_outs_0.imol['s', 'TAL'] == 0.0001:
# H401_ins_0.imol['s', 'TAL'] += 0.0001
H401.specification = H401_spec
U402 = bst.FakeSplitter('U402', ins=H401-0, outs = ('thermally_decarboxylated_broth','vented_CO2'))
U402.decarboxylation_rxns = ParallelRxn([
Rxn('TAL + H2O -> PD + CO2', 'TAL', 0.25),
])
def get_TAL_decarboxylation_conversion(T=273.15+80.):
return (0.2*(T-273.15) + 8.)/100. # temporaury
def U402_spec():
U402_outs_0 = U402.outs[0]
U402_outs_0.copy_like(U402.ins[0])
U402_outs_0.phases = ('l', 's')
U402.decarboxylation_rxns[0].X = get_TAL_decarboxylation_conversion(T=U402_outs_0.T)
U402.decarboxylation_rxns[0](U402_outs_0['l'])
U402.outs[1].imol['CO2'] = U402_outs_0.imol['l', 'CO2']
U402.outs[1].phase = 'g'
U402_outs_0.imol['l', 'CO2'] = 0.
U402.specification = U402_spec
# H401_design_og = H401._design
# def H401_design_modified():
# H401.ins[0].copy_like(U401.ins[0])
# H401.outs[0].copy_like(U401.ins[0])
# H401_design_og()
# # Remove solids from fermentation broth, modified from the pressure filter in Humbird et al.
S401_index = [splits_df.index[0]] + splits_df.index[2:].to_list()
S401_cell_mass_split = [splits_df['stream_571'][0]] + splits_df['stream_571'][2:].to_list()
S401_filtrate_split = [splits_df['stream_535'][0]] + splits_df['stream_535'][2:].to_list()
S401 = bst.units.SolidsCentrifuge('S401', ins=U402-0, outs=('S401_solid_fraction', 'S401_liquid_fraction'),
# moisture_content=0.50,
split=find_split(S401_index,
S401_cell_mass_split,
S401_filtrate_split,
chemical_groups),
solids =\
['Xylan', 'Glucan', 'Lignin', 'FermMicrobe',\
'Ash', 'Arabinan', 'Galactan', 'Mannan'])
# def S401_TAL_split_spec():
# S401._run()
# S401_ins_0 = S401.ins[0]
# TOT_TAL = S401_ins_0.imol['TAL']
# dissolved_TAL = get_mol_TAL_dissolved(S401_ins_0.T, S401_ins_0.imol['Water'])
# S401.outs[0].imol['TAL'] = TOT_TAL - dissolved_TAL # crystallized TAL
# S401.outs[1].imol['TAL'] = dissolved_TAL
def S401_TAL_split_spec():
S401._run()
S401_ins_0 = S401.ins[0]
S401.outs[0].imol['TAL'] = S401_ins_0.imol['s', 'TAL']
S401.outs[1].imol['TAL'] = S401_ins_0.imol['l', 'TAL']
S401.specification = S401_TAL_split_spec
H402 = bst.HXutility('H402', ins=S401-1, outs=('H402_0'), T=273.15+1.)
# def HXcrystalize(stream, T=None, H=None, P=None, V=None):
# tot_TAL = stream.imol['TAL']
# mol_TAL_dissolved = get_mol_TAL_dissolved(stream.T, stream.imol['Water'])
# stream.phases = ('s', 'l')
# stream.T = H402.T
# tal_dissolved = min(mol_TAL_dissolved, tot_TAL)
# stream.imol['l', 'TAL'] =
# stream.imol['s', 'TAL'] = max(0.0001, tot_TAL - min(mol_TAL_dissolved, tot_TAL))
def H402_spec():
H402._run()
H402_ins_0 = H402.ins[0]
tot_TAL = H402_ins_0.imol['TAL']
H402_outs_0 = H402.outs[0]
TAL_solubility = get_mol_TAL_dissolved(H402_outs_0.T, H402_outs_0.imol['Water'])
H402_outs_0.phases = ('s', 'l')
H402_outs_0.T = H402.T
TAL_dissolved = min(TAL_solubility, tot_TAL)
H402_outs_0.imol['l', 'TAL'] = TAL_dissolved
H402_outs_0.imol['s', 'TAL'] = max(0, tot_TAL - TAL_dissolved)
# H402_outs_0.imol['s', 'TAL'] = max(0.0001, tot_TAL - TAL_dissolved)
# if H402_outs_0.imol['s', 'TAL'] == 0.0001:
# H402_ins_0.imol['s', 'TAL'] += 0.0001
H402.specification = H402_spec
S402 = bst.units.SolidsCentrifuge('S402', ins=H402-0, outs=('S402_solid_fraction', 'S402_liquid_fraction'),
# moisture_content=0.50,
split=find_split(S401_index,
S401_cell_mass_split,
S401_filtrate_split,
chemical_groups), solids =\
['Xylan', 'Glucan', 'Lignin', 'FermMicrobe',\
'Ash', 'Arabinan', 'Galactan', 'Mannan'])
def S402_TAL_split_spec():
# S402._run()
# S402_ins_0 = S402.ins[0]
# S402_outs_0 = S402.outs[0]
# S402_outs_0.imol['TAL'] = 1.
# S402_outs_0.sle('TAL', S402_outs_0.T) #!!! TODO: use computationally cheaper way of changing from Stream to MultiStream
# S402_outs_0.imol['s', 'TAL'] = S402_ins_0.imol['s', 'TAL']
# S402_outs_0.imol['l', 'TAL'] = 0.
# S402.outs[1].imol['TAL'] = S402_ins_0.imol['l', 'TAL']
S402_ins_0 = S402.ins[0]
solid_TAL = float(S402_ins_0.imol['s', 'TAL'])
S402_ins_0.imol['s', 'TAL'] = 0.
S402._run()
S402.outs[0].imol['TAL'] = solid_TAL
S402.outs[1].imol['TAL'] = S402_ins_0.imol['l', 'TAL']
S402_ins_0.imol['s', 'TAL'] = solid_TAL
S402.specification = S402_TAL_split_spec
H403 = bst.HXutility('H403', ins=S402-0, outs=('heated_TAL'), T=273.15+40.)
F401 = bst.Flash('F401', ins=H403-0, outs = ('volatiles', 'pure_TAL_product'), V = 0.99, P=101325.)
def F401_spec():
F401_ins_0 = F401.ins[0]
F401.V = sum(F401_ins_0.imol['H2O',
'AceticAcid',
'Furfural',
'HMF',]) / F401_ins_0.F_mol
F401._run()
# F401.outs[1].imol['TAL'] = F401.ins[0].imol['TAL']
# F401.outs[0].imol['TAL'] = 0.
F401.specification = F401_spec
# %%
# =============================================================================
# Wastewater treatment streams
# =============================================================================
# For aerobic digestion, flow will be updated in AerobicDigestion
air_lagoon = Stream('air_lagoon', phase='g', units='kg/hr')
# To neutralize nitric acid formed by nitrification in aerobic digestion
# flow will be updated in AerobicDigestion
# The active chemical is modeled as NaOH, but the price is cheaper than that of NaOH
aerobic_caustic = Stream('aerobic_caustic', units='kg/hr', T=20+273.15, P=2*101325,
price=price['Caustics'])
# =============================================================================
# Wastewater treatment units
# =============================================================================
# Mix waste liquids for treatment
M501 = bst.units.Mixer('M501', ins=(
# F301-1,
S402-1,
F401-0,
# r_S402_s-1, r_S403_s-1, r_S404_s-1,
# X401-1, S408-0,
))
# This represents the total cost of wastewater treatment system
WWT_cost = units.WastewaterSystemCost('WWTcost501', ins=M501-0)
R501 = units.AnaerobicDigestion('R501', ins=WWT_cost-0,
outs=('biogas', 'anaerobic_treated_water',
'anaerobic_sludge'),
reactants=soluble_organics,
split=find_split(splits_df.index,
splits_df['stream_611'],
splits_df['stream_612'],
chemical_groups),
T=35+273.15)
get_flow_tpd = lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/907.185
# Mix recycled stream and wastewater after R501
M502 = bst.units.Mixer('M502', ins=(R501-1, ''))
R502 = units.AerobicDigestion('R502', ins=(M502-0, air_lagoon, aerobic_caustic),
outs=('aerobic_vent', 'aerobic_treated_water'),
reactants=soluble_organics,
ratio=get_flow_tpd()/2205)
# Membrane bioreactor to split treated wastewater from R502
S501 = bst.units.Splitter('S501', ins=R502-1, outs=('membrane_treated_water',
'membrane_sludge'),
split=find_split(splits_df.index,
splits_df['stream_624'],
splits_df['stream_625'],
chemical_groups))
S501.line = 'Membrane bioreactor'
# Recycled sludge stream of memberane bioreactor, the majority of it (96%)
# goes to aerobic digestion and the rest to sludge holding tank then to BT
S502 = bst.units.Splitter('S502', ins=S501-1, outs=('to_aerobic_digestion',
'to_boiler_turbogenerator'),
split=0.96)
M503 = bst.units.Mixer('M503', ins=(S502-0, 'centrate'), outs=1-M502)
# Mix anaerobic and 4% of membrane bioreactor sludge
M504 = bst.units.Mixer('M504', ins=(R501-2, S502-1))
# Sludge centrifuge to separate water (centrate) from sludge
S503 = bst.units.Splitter('S503', ins=M504-0, outs=(1-M503, 'sludge'),
split=find_split(splits_df.index,
splits_df['stream_616'],
splits_df['stream_623'],
chemical_groups))
S503.line = 'Sludge centrifuge'
# Reverse osmosis to treat membrane separated water
S504 = bst.units.Splitter('S504', ins=S501-0, outs=('discharged_water', 'waste_brine'),
split=find_split(splits_df.index,
splits_df['stream_626'],
splits_df['stream_627'],
chemical_groups))
S504.line = 'Reverse osmosis'
# Mix solid wastes to boiler turbogenerator
M505 = bst.units.Mixer('M505', ins=(S503-1,
# S301-0,
S401-0,
# F401-0, D401-0,
),
outs='wastes_to_boiler_turbogenerator')
# %%
# =============================================================================
# Facilities streams
# =============================================================================
sulfuric_acid_fresh = Stream('sulfuric_acid_fresh', price=price['Sulfuric acid'])
# TCP_fresh = Stream('TCP_fresh', price=price['TCP'])
ammonia_fresh = Stream('ammonia_fresh', price=price['AmmoniumHydroxide'])
CSL_fresh = Stream('CSL_fresh', price=price['CSL'])
# lime_fresh = Stream('lime_fresh', price=price['Lime'])
HCl_fresh = Stream('HCl_fresh', price=price['HCl'])
hexanol_fresh = Stream('hexanol_fresh', price=price['Hexanol'])
# heptane_fresh = Stream('heptane_fresh', price=price['Heptane'])
# toluene_fresh = Stream('toluene_fresh', price=price['Toluene'])
# hexanol_fresh_s = Stream('hexanol_fresh_s', price=price['Hexanol'])
heptane_fresh_s = Stream('heptane_fresh_s', price=price['Heptane'])
toluene_fresh_s = Stream('toluene_fresh_s', price=price['Toluene'])
hydrogen_fresh = Stream('hydrogen_fresh', price=price['Hydrogen'])
KOH_fresh = Stream('KOH_fresh', price=price['KOH'])
# S401_out1_F_mass = S401.outs[1].F_mass
# if not (S401_out1_F_mass == 0):
# ethanol_fresh = Stream('ethanol_fresh', Ethanol = 0.24 * S401_out1_F_mass, units='kg/hr', price=price['Ethanol']) - M401.ins[3].imass['Ethanol']
# DPHP_fresh = Stream('DPHP_fresh', DPHP = 0.25 * S401_out1_F_mass, units='kg/hr', price=price['DPHP']) - M401.ins[3].imass['Dipotassium hydrogen phosphate']
# else:
# ethanol_fresh = Stream('ethanol_fresh', Ethanol = get_feedstock_dry_mass()*48*22.1/1000*0.93, units='kg/hr', price=price['Ethanol'])
# DPHP_fresh = Stream('DPHP_fresh', DPHP = get_feedstock_dry_mass()*50*22.1/1000*0.93, units='kg/hr', price=price['DPHP'])
# Water used to keep system water usage balanced
system_makeup_water = Stream('system_makeup_water', price=price['Makeup water'])
# TAL stream
# TAL = Stream('TAL', units='kg/hr', price=price['TAL'])
# SA product
SA = Stream('SA', units='kg/hr', price=price['SA'])
# Acetoin product
# Acetoin = Stream('Acetoin', units='kg/hr', price=price['Acetoin'])
# # Isobutyraldehyde product
# IBA = Stream('IBA', units='kg/hr', price=price['IBA'])
# Chemicals used/generated in BT
# FGD_lime = Stream('FGD_lime')
ash = Stream('ash', price=price['Ash disposal'])
# boiler_chems = Stream('boiler_chems', price=price['Boiler chems'])
# baghouse_bag = Stream('baghouse_bag', price=price['Baghouse bag'])
# Supplementary natural gas for BT if produced steam not enough for regenerating
# all steam streams required by the system
# natural_gas = Stream('natural_gas', price=price['Natural gas'])
# Cooling tower chemicals
cooling_tower_chems = Stream('cooling_tower_chems', price=price['Cooling tower chems'])
# 145 based on equipment M-910 (clean-in-place system) in Humbird et al.
CIP_chems_in = Stream('CIP_chems_in', Water=145*get_flow_tpd()/2205, units='kg/hr')
# 1372608 based on stream 950 in Humbird et al.
# Air needed for multiple processes (including enzyme production that was not included here),
# not rigorously modeled, only scaled based on plant size
plant_air_in = Stream('plant_air_in', phase='g', units='kg/hr',
N2=0.79*1372608*get_flow_tpd()/2205,
O2=0.21*1372608*get_flow_tpd()/2205)
# 8021 based on stream 713 in Humbird et al.
fire_water_in = Stream('fire_water_in',
Water=8021*get_flow_tpd()/2205, units='kg/hr')
# =============================================================================
# Facilities units
# =============================================================================
# T601 = units.SulfuricAcidStorageTank('T601', ins=sulfuric_acid_fresh,
# outs=sulfuric_acid_T201)
# T601.line = 'Sulfuric acid storage tank'
# S601 = bst.units.ReversedSplitter('S601', ins=T601-0,
# outs=(pretreatment_sulfuric_acid,
# ''))
# T608 = units.TCPStorageTank('T608', ins=TCP_fresh,
# outs='TCP_catalyst')
# T608-0-3-R401
# T608.line = 'Tricalcium diphosphate storage tank'
#
# T602 = units.AmmoniaStorageTank('T602', ins=ammonia_fresh, outs=ammonia_M205)
# T602.line = 'Ammonia storage tank'
T603 = units.CSLstorageTank('T603', ins=CSL_fresh, outs=CSL)
T603.line = 'CSL storage tank'
# DPHP storage
#!!! Yalin suggests to use BioSTEAM's storage tank, and maybe we don't need the ConveryingBelt
# (Yalin removed that from lactic acid biorefinery)
T604 = units.DPHPStorageTank('T604', ins=hexanol_fresh)
T604.line = 'Hexanol storage tank'
T604_P = units.TALPump('T604_P', ins=T604-0, outs = Hexanol_minimal)
# T604_P = bst.units.ConveyingBelt('T604_P', ins=T604-0, outs = Hexanol)
# # 7-day storage time, similar to ethanol's in Humbird et al.
# T605 = units.DPHPStorageTank('T605', ins=heptane_fresh)
# T605.line = 'Heptane storage tank'
# T605_P = units.TALPump('T605_P', ins=T605-0, outs = Heptane)
# T606 = units.DPHPStorageTank('T606', ins=toluene_fresh)
# T606.line = 'Toluene storage tank'
# T606_P = units.TALPump('T606_P', ins=T606-0, outs = Toluene)
T607 = units.DPHPStorageTank('T607', ins=hydrogen_fresh, outs = Hydrogen)
T607.line = 'Hydrogen storage tank'
T608 = units.DPHPStorageTank('T608', ins=HCl_fresh, outs = HCl,
vessel_material = 'Stainless steel')
T608.line = 'HCl storage tank'
T609 = units.DPHPStorageTank('T609', ins=KOH_fresh, outs = KOH,
vessel_material = 'Stainless steel')
T609.line = 'KOH storage tank'
# T604_s = units.DPHPStorageTank('T604_s', ins=hexanol_fresh_s)
# T604_s.line = 'Hexanol storage tank s'
# T604_s_P = units.TALPump('T604_s_P', ins=T604_s-0, outs = Hexanol_s)
# 7-day storage time, similar to ethanol's in Humbird et al.
T605_s = units.DPHPStorageTank('T605_s', ins=heptane_fresh_s)
T605_s.line = 'Heptane storage tank s'
T605_s_P = units.TALPump('T605_s_P', ins=T605_s-0, outs = Heptane_s)
T606_s = units.DPHPStorageTank('T606_s', ins=toluene_fresh_s)
T606_s.line = 'Toluene storage tank s'
T606_s_P = units.TALPump('T606_s_P', ins=T606_s-0, outs = Toluene_s)
# T607_P = units.TALPump('T607_P', ins=T607-0, outs = Hydrogen)
# Connections to ATPE Mixer
# T604_P-0-1-M401
# T605_P-0-2-M401
# 7-day storage time, similar to ethanol's in Humbird et al.
T620 = units.TALStorageTank('T620', ins=F401-1, tau=7*24, V_wf=0.9,
vessel_type='Floating roof',
vessel_material='Stainless steel')
T620.line = 'SAStorageTank'
T620_P = units.TALPump('T620_P', ins=T620-0, outs=SA)
# # 7-day storage time, similar to ethanol's in Humbird et al.
# T607 = units.TALStorageTank('T607', ins=D402_H-0, tau=7*24, V_wf=0.9,
# vessel_type='Floating roof',
# vessel_material='Stainless steel')
# T607.line = 'AcetoinStorageTank'
# T607_P = units.TALPump('T607_P', ins=T607-0, outs=Acetoin)
# # 7-day storage time, similar to ethanol's in Humbird et al.
# T608 = units.TALStorageTank('T608', ins=D403_H-0, tau=7*24, V_wf=0.9,
# vessel_type='Floating roof',
# vessel_material='Stainless steel')
# T608.line = 'IBAStorageTank'
# T608_P = units.TALPump('T608_P', ins=T608-0, outs=IBA)
CIP = facilities.CIP('CIP901', ins=CIP_chems_in, outs='CIP_chems_out')
ADP = facilities.ADP('ADP902', ins=plant_air_in, outs='plant_air_out',
ratio=get_flow_tpd()/2205)
FWT = units.FireWaterTank('FWT903', ins=fire_water_in, outs='fire_water_out')
CWP = facilities.CWP('CWP802', ins='return_chilled_water',
outs='process_chilled_water')
# M505-0 is the liquid/solid mixture, R501-0 is the biogas, blowdown is discharged
# BT = facilities.BT('BT', ins=(M505-0, R501-0,
# FGD_lime, boiler_chems,
# baghouse_bag, natural_gas,
# 'BT_makeup_water'),
# B_eff=0.8, TG_eff=0.85,
# combustibles=combustibles,
# side_streams_to_heat=(water_M201, water_M202, steam_M203),
# outs=('gas_emission', ash, 'boiler_blowdown_water'))
BT = bst.facilities.BoilerTurbogenerator('BT701',
ins=(M505-0,
R501-0,
'boiler_makeup_water',
'natural_gas',
'lime',
'boilerchems'),
outs=('gas_emission', 'boiler_blowdown_water', ash,),
turbogenerator_efficiency=0.85)
# BT = bst.BDunits.BoilerTurbogenerator('BT',
# ins=(M505-0, R501-0, 'boiler_makeup_water', 'natural_gas', FGD_lime, boiler_chems),
# boiler_efficiency=0.80,
# turbogenerator_efficiency=0.85)
# Blowdown is discharged
CT = facilities.CT('CT801', ins=('return_cooling_water', cooling_tower_chems,
'CT_makeup_water'),
outs=('process_cooling_water', 'cooling_tower_blowdown'))
# All water used in the system, here only consider water usage,
# if heating needed, then heeating duty required is considered in BT
process_water_streams = (enzyme_water,
aerobic_caustic,
CIP.ins[-1], BT.ins[-1], CT.ins[-1])
PWC = facilities.PWC('PWC904', ins=(system_makeup_water, S504-0),
process_water_streams=process_water_streams,
recycled_blowdown_streams=None,
outs=('process_water', 'discharged_water'))
# Heat exchanger network
from hxn._heat_exchanger_network import HeatExchangerNetwork
# from biosteam import HeatExchangerNetwork
HXN = HeatExchangerNetwork('HXN1001',
# ignored=[H401, H402],
)
def HXN_no_run_cost():
HXN.heat_utilities = tuple()
HXN._installed_cost = 0.
# To simulate without HXN, uncomment the following 3 lines:
# HXN._cost = HXN_no_run_cost
# HXN.energy_balance_percent_error = 0.
# HXN.new_HXs = HXN.new_HX_utils = []
# HXN = HX_Network('HXN')
# %%
# =============================================================================
# Complete system
# =============================================================================
TAL_sys = create_TAL_sys()
f = bst.main_flowsheet
u = f.unit
s = f.stream
feedstock = s.feedstock
SA = s.SA
get_flow_tpd = lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/907.185
TEA_feeds = set([i for i in TAL_sys.feeds if i.price]+ \
[i for i in TAL_sys.feeds if i.price])
TEA_products = set([i for i in TAL_sys.products if i.price]+ \
[i for i in TAL_sys.products if i.price]+[SA])
for ui in u:
globals().update({ui.ID: ui})
# %%
# =============================================================================
# TEA
# =============================================================================
# TAL_tea = CellulosicEthanolTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
# depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
# lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
# startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
# startup_VOCfrac=0.75, WC_over_FCI=0.05,
# finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# # biosteam Splitters and Mixers have no cost,
# # cost of all wastewater treatment units are included in WWT_cost,
# # BT is not included in this TEA
# OSBL_units=(u.U101, u.WWT_cost,
# u.T601, u.T602, u.T603, u.T606, u.T606_P,
# u.CWP, u.CT, u.PWC, u.CIP, u.ADP, u.FWT, u.BT),
# warehouse=0.04, site_development=0.09, additional_piping=0.045,
# proratable_costs=0.10, field_expenses=0.10, construction=0.20,
# contingency=0.10, other_indirect_costs=0.10,
# labor_cost=3212962*get_flow_tpd()/2205,
# labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
# steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT)
# TAL_no_BT_tea = TAL_tea
TAL_tea = TALTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
startup_VOCfrac=0.75, WC_over_FCI=0.05,
finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# biosteam Splitters and Mixers have no cost,
# cost of all wastewater treatment units are included in WWT_cost,
# BT is not included in this TEA
OSBL_units=(u.U101, u.WWTcost501,
# u.T601, u.T602,
u.T603, u.T604, u.T620,
# u.T606, u.T606_P,
u.CWP802, u.CT801, u.PWC904, u.CIP901, u.ADP902, u.FWT903, u.BT701),
warehouse=0.04, site_development=0.09, additional_piping=0.045,
proratable_costs=0.10, field_expenses=0.10, construction=0.20,
contingency=0.10, other_indirect_costs=0.10,
labor_cost=3212962*get_flow_tpd()/2205,
labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT701)
TAL_no_BT_tea = TAL_tea
# # Removed because there is not double counting anyways.
# # Removes feeds/products of BT_sys from TAL_sys to avoid double-counting
# for i in BT_sys.feeds:
# TAL_sys.feeds.remove(i)
# for i in BT_sys.products:
# TAL_sys.products.remove(i)
# Boiler turbogenerator potentially has different depreciation schedule
# BT_tea = bst.TEA.like(BT_sys, TAL_no_BT_tea)
# BT_tea.labor_cost = 0
# Changed to MACRS 20 to be consistent with Humbird
# BT_tea.depreciation = 'MACRS20'
# BT_tea.OSBL_units = (BT,)
# %%
# =============================================================================
# Simulate system and get results
# =============================================================================
# def get_TAL_MPSP():
# TAL_sys.simulate()
# for i in range(3):
# TAL.price = TAL_tea.solve_price(TAL, TAL_no_BT_tea)
# return TAL.price
def get_SA_MPSP():
for i in range(3):
TAL_sys.simulate()
for i in range(3):
SA.price = TAL_tea.solve_price(SA)
return SA.price*SA.F_mass/SA.imass['TAL']
def get_titer():
return R302.outs[0].imass['TAL']/R302.outs[0].F_vol
def set_titer(titer):
M304.water_multiplier *= get_titer()/titer
get_SA_MPSP()
return get_titer()
# get_SA_MPSP()
# R301 = F('R301') # Fermentor
# yearly_production = 125000 # ton/yr
spec = ProcessSpecification(
evaporator = None,
pump = None,
mixer = u.M304,
heat_exchanger = u.M304_H,
seed_train_system = [],
reactor= u.R302,
reaction_name='fermentation_reaction',
substrates=('Xylose', 'Glucose'),
products=('TAL',),
spec_1=0.19,
spec_2=28.,
spec_3=0.19,
xylose_utilization_fraction = 0.80,
feedstock = feedstock,
dehydration_reactor = None,
byproduct_streams = [],
HXN = u.HXN1001,
maximum_inhibitor_concentration = 1.,
# pre_conversion_units = process_groups_dict['feedstock_group'].units + process_groups_dict['pretreatment_group'].units + [u.H301], # if the line below does not work (depends on BioSTEAM version)
pre_conversion_units = TAL_sys.split(u.M304.ins[0])[0],
# set baseline fermentation performance here
baseline_yield = 0.19,
baseline_titer = 28.,
baseline_productivity = 0.19,
# baseline_yield = 0.30,
# baseline_titer = 25.,
# baseline_productivity = 0.19,
feedstock_mass = feedstock.F_mass,
pretreatment_reactor = None)
spec.load_spec_1 = spec.load_yield
# spec.load_spec_2 = spec.load_titer
spec.load_spec_3 = spec.load_productivity
def M304_titer_obj_fn(water_to_sugar_mol_ratio):
M304, R302 = u.M304, u.R302
M304.water_to_sugar_mol_ratio = water_to_sugar_mol_ratio
M304.specification[0][0]()
u.M304_H._run()
u.S302._run()
u.R303._run()
u.T301._run()
R302.specification[0][0]()
# broth = R302.outs[0]
# return broth.imass['TAL']/broth.F_vol - R302.titer_to_load
return R302.effluent_titer - R302.titer_to_load
def load_titer_with_glucose(titer_to_load):
spec.spec_2 = titer_to_load
u.R302.titer_to_load = titer_to_load
flx.IQ_interpolation(M304_titer_obj_fn, 1e-3, 20000.)
# u.AC401.regeneration_velocity = min(14.4, 3.1158 + ((14.4-3.1158)/(30.-3.))*(titer_to_load-3.)) # heuristic to obtain regeneration velocity at which MPSP is minimum fitted to results from simulations at target_recovery=0.99
# u.AC401.regeneration_velocity = 14.4
spec.load_spec_2 = load_titer_with_glucose
# path = (F301, R302)
# @np.vectorize
# def calculate_titer(V):
# F301.V = V
# for i in path: i._run()
# return spec._calculate_titer()
# @np.vectorize
# def calculate_MPSP(V):
# F301.V = V
# TAL_sys.simulate()
# MPSP = SA.price = TAL_tea.solve_price(SA, TAL_no_BT_tea)
# return MPSP
# vapor_fractions = np.linspace(0.20, 0.80)
# titers = calculate_titer(vapor_fractions)
# MPSPs = calculate_MPSP(vapor_fractions)
# import matplotlib.pyplot as plt
# plt.plot(vapor_fractions, titers)
# plt.show()
# plt.plot(titers, MPSPs)
# plt.show()
# %%
# =============================================================================
# Life cycle analysis (LCA), waste disposal emission not included
# =============================================================================
# 100-year global warming potential (GWP) from material flows
LCA_streams = TEA_feeds.copy()
LCA_stream = Stream('LCA_stream', units='kg/hr')
def get_material_GWP():
LCA_stream.mass = sum(i.mass for i in LCA_streams)
chemical_GWP = LCA_stream.mass*CFs['GWP_CF_stream'].mass
# feedstock_GWP = feedstock.F_mass*CFs['GWP_CFs']['Corn stover']
return chemical_GWP.sum()/SA.F_mass
# GWP from combustion of non-biogenic carbons
get_non_bio_GWP = lambda: (natural_gas.get_atomic_flow('C'))* TAL_chemicals.CO2.MW / SA.F_mass
# +ethanol_fresh.get_atomic_flow('C')) \
# GWP from electricity
get_electricity_use = lambda: sum(i.power_utility.rate for i in TAL_sys.units)
get_electricity_GWP = lambda: get_electricity_use()*CFs['GWP_CFs']['Electricity'] \
/ SA.F_mass
# CO2 fixed in lactic acid product
get_fixed_GWP = lambda: \
SA.get_atomic_flow('C')*TAL_chemicals.CO2.MW/SA.F_mass
# carbon_content_of_feedstock = 0
get_GWP = lambda: get_material_GWP()+get_non_bio_GWP()+get_electricity_GWP()
# Fossil energy consumption (FEC) from materials
def get_material_FEC():
LCA_stream.mass = sum(i.mass for i in LCA_streams)
chemical_FEC = LCA_stream.mass*CFs['FEC_CF_stream'].mass
# feedstock_FEC = feedstock.F_mass*CFs['FEC_CFs']['Corn stover']
return chemical_FEC.sum()/SA.F_mass
# FEC from electricity
get_electricity_FEC = lambda: \
get_electricity_use()*CFs['FEC_CFs']['Electricity']/SA.F_mass
# Total FEC
get_FEC = lambda: get_material_FEC()+get_electricity_FEC()
# get_SPED = lambda: BT.system_heating_demand*0.001/SA.F_mass
SA_LHV = 31.45 # MJ/kg SA
# %% Full analysis
def simulate_and_print():
get_SA_MPSP()
print('\n---------- Simulation Results ----------')
print(f'MPSP is ${get_SA_MPSP():.3f}/kg')
# print(f'GWP is {get_GWP():.3f} kg CO2-eq/kg SA')
# print(f'FEC is {get_FEC():.2f} MJ/kg SA or {get_FEC()/SA_LHV:.2f} MJ/MJ SA')
# print(f'SPED is {get_SPED():.2f} MJ/kg SA or {get_SPED()/SA_LHV:.2f} MJ/MJ SA')
# print('--------------------\n')
# simulate_and_print()
# TAL_sys.simulate()
get_SA_MPSP()
spec.load_specifications(0.203, 35.9, 0.21)
simulate_and_print()
# %%
# =============================================================================
# For Monte Carlo and analyses
# =============================================================================
TAL_sub_sys = {
# 'feedstock_sys': (U101,),
# 'pretreatment_sys': (T201, M201, M202, M203,
# R201, R201_H, T202, T203,
# F201, F201_H,
# M204, T204, T204_P,
# M205, M205_P),
# 'conversion_sys': (H301, M301, M302, R301, R302, T301),
# 'separation_sys': (S401, M401, M401_P,
# S402,
# # F401, F401_H, X401,
# D401, D401_H, D401_P, S403,
# M402_P, S403,
# D403, D403_H, D403_P,
# M501,
# T606, T606_P, T607, T607_P)
# F402, F402_H, F402_P,
# D405, D405_H1, D405_H2, D405_P,
# M401, M401_P)
# 'wastewater_sys': (M501, WWT_cost, R501,
# M502, R502, S501, S502, M503,
# M504, S503, S504, M505),
# 'HXN': (HXN,),
# 'BT': (BT,),
# 'CT': (CT,),
# 'other_facilities': (T601, S601,
# T602, T603,
# T604, T604_P,
# T605, T605_P,
# T606, T606_P,
# PWC, CIP, ADP, FWT)
}
# for unit in sum(TAL_sub_sys.values(), ()):
# if not unit in TAL_sys.units:
# print(f'{unit.ID} not in TAL_sys.units')
# for unit in TAL_sys.units:
# if not unit in sum(TAL_sub_sys.values(), ()):
# print(f'{unit.ID} not in TAL_sub_sys') | 42.560579 | 252 | 0.574312 |
# Copyright (C) 2022-2023, Sarang Bhagwat <sarangb2@illinois.edu> (this biorefinery)
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
# %% Setup
import biosteam as bst
import thermosteam as tmo
import flexsolve as flx
import numpy as np
from math import exp as math_exp
# from biosteam import main_flowsheet as F
# from copy import deepcopy
# from biosteam import System
from thermosteam import Stream
# from biorefineries.cornstover import CellulosicEthanolTEA
from biorefineries.TAL import units, facilities
from biorefineries.TAL._process_specification import ProcessSpecification
from biorefineries.TAL.process_settings import price, CFs
from biorefineries.TAL.utils import find_split, splits_df, baseline_feedflow
from biorefineries.TAL.chemicals_data import TAL_chemicals, chemical_groups, \
soluble_organics, combustibles
# from biorefineries.TAL.tea import TALTEA
from biorefineries.cornstover import CellulosicEthanolTEA as TALTEA
from biosteam import SystemFactory
from warnings import filterwarnings
filterwarnings('ignore')
Rxn = tmo.reaction.Reaction
ParallelRxn = tmo.reaction.ParallelReaction
# from lactic.hx_network import HX_Network
# # Do this to be able to show more streams in a diagram
# bst.units.Mixer._graphics.edge_in *= 2
bst.speed_up()
flowsheet = bst.Flowsheet('TAL')
bst.main_flowsheet.set_flowsheet(flowsheet)
# Speeds up ShortcutDistillation
bst.units.ShortcutColumn.minimum_guess_distillate_recovery = 0
# Baseline cost year is 2016
bst.CE = 541.7
# _labor_2007to2016 = 22.71 / 19.55
# Set default thermo object for the system
tmo.settings.set_thermo(TAL_chemicals)
# %% Utils
R = 8.314
TAL_Hm = 30883.66976 # by Dannenfelser-Yalkowsky method
TAL_Tm = TAL_chemicals['TAL'].Tm # 458.15 K
TAL_c = 6056.69421768496 # fitted parameter
TAL_c_by_R = TAL_c/R
TAL_Hm_by_R = TAL_Hm/R
def get_TAL_solubility_in_water(T): # mol TAL : mol (TAL+water)
return math_exp(-(TAL_Hm_by_R) * (1/T - 1/TAL_Tm))/math_exp(TAL_c_by_R/T)
def get_mol_TAL_dissolved(T, mol_water):
TAL_x = get_TAL_solubility_in_water(T)
return mol_water*TAL_x/(1-TAL_x)
def get_TAL_solubility_in_water_gpL(T):
return get_mol_TAL_dissolved(T, 1000./18.)*TAL_chemicals['TAL'].MW
def get_K(chem_ID, stream, phase_1, phase_2):
return (stream[phase_1].imol[chem_ID]/stream[phase_1].F_mol)/max(1e-6, (stream[phase_2].imol[chem_ID]/stream[phase_2].F_mol))
def get_TAL_solublity_in_solvent_very_rough(T, solvent_ID='Hexanol', units='g/L'):
temp_stream =\
tmo.Stream('temp_stream_get_TAL_solublity_in_solvent_very_rough')
mol_water = mol_solvent = 1000
mol_TAL = get_mol_TAL_dissolved(T, mol_water)
temp_stream.imol['Water'] = mol_water
temp_stream.imol[solvent_ID] = mol_solvent
temp_stream.imol['TAL'] = mol_TAL
temp_stream.lle(T=T, P=temp_stream.P)
# temp_stream.show(N=100)
phase_1 = 'l' if temp_stream.imol['l', solvent_ID] > temp_stream.imol['L', solvent_ID] else 'L'
phase_2 = 'L' if phase_1=='l' else 'l'
K_TAL_in_extract = get_K('TAL', temp_stream, phase_1, phase_2)
# print(K_TAL_in_extract)
if units=='g/L':
temp_stream_2 = tmo.Stream('temp_stream_2_get_TAL_solublity_in_solvent_very_rough')
temp_stream_2.imol['TAL'] = K_TAL_in_extract*mol_TAL
temp_stream_2.imol[solvent_ID] = mol_solvent
return temp_stream_2.imass['TAL']/temp_stream_2.F_vol
elif units=='mol/mol':
return K_TAL_in_extract*mol_TAL/(mol_TAL+mol_solvent) #
def get_TAL_solubility_in_hexanol():
return 2.*0.0222/(2.*0.0222+0.951) # mol/mol; 2 * Marco's initial experimental solubility of 2.8 wt% at 21 C
def get_TAL_solubility_in_ethanol_ww():
return 0.167682
@SystemFactory(ID = 'TAL_sys')
def create_TAL_sys(ins, outs):
feedstock = Stream('feedstock')
feedstock.imass['Glucose'] = 29000.
feedstock.imass['H2O'] = 500.
feedstock.price = price['Glucose']*feedstock.imass['Glucose']/feedstock.F_mass
feedstock.F_mass = 25802.9
U101 = units.FeedstockPreprocessing('U101', ins=feedstock)
U101.cost_items['System'].cost = 0
U101.cost_items['System'].kW = 0
enzyme = Stream('enzyme', units='kg/hr', price=price['Enzyme'])
enzyme_water = Stream('enzyme_water', units='kg/hr')
CSL = Stream('CSL', units='kg/hr')
dilution_water = Stream('dilution_water', units='kg/hr')
H301 = bst.units.HXutility('H301', ins=U101-0, T=50+273.15)
M304 = bst.units.Mixer('M304', ins=(H301-0, dilution_water))
M304.water_to_sugar_mol_ratio = 5.
@M304.add_specification()
def adjust_M304_water():
M304_ins_1 = M304.ins[1]
M304_ins_1.imol['Water'] = M304.water_to_sugar_mol_ratio * M304.ins[0].imol['Glucose', 'Xylose'].sum()
M304._run()
M304_H = bst.units.HXutility('M304_H', ins=M304-0, T=30+273.15, rigorous=True)
S302 = bst.Splitter('S302', ins=M304_H-0,
outs = ('to_seedtrain', 'to_cofermentation'),
split = 0.07)
R302 = units.CoFermentation('R302',
ins=(S302-1, '', CSL),
outs=('fermentation_effluent', 'CO2_fermentation'))
def include_seed_CSL_in_cofermentation():
R302._run()
R302.ins[2].F_mass*=1./(1-S302.split[0])
R302.specification = include_seed_CSL_in_cofermentation
R303 = units.SeedTrain('R303', ins=S302-0, outs=('seed', 'CO2_seedtrain'), ferm_ratio=0.95)
T301 = units.SeedHoldTank('T301', ins=R303-0, outs=1-R302)
hr')
KOH = Stream('KOH', units = 'kg/hr')
HCl = Stream('HCl', units = 'kg/hr')
U401 = bst.Unit('U401', ins=R302-0, outs=('fermentation_broth_first_sle'))
def U401_spec():
U401_ins_0 = U401.ins[0]
tot_TAL = U401_ins_0.imol['TAL']
U401_outs_0 = U401.outs[0]
U401_outs_0.copy_like(U401_ins_0)
mol_TAL_dissolved = get_mol_TAL_dissolved(U401_outs_0.T, U401_outs_0.imol['Water'])
(mol_TAL_dissolved, tot_TAL)
U401_outs_0.imol['s', 'TAL'] = tot_TAL - min(mol_TAL_dissolved, tot_TAL)
U401.specification = U401_spec
H401 = bst.HXutility('H401', ins=U401-0, outs=('H401_0'), T=273.15+70.)
def H401_spec():
H401_ins_0 = H401.ins[0]
H401_ins_0_water=H401_ins_0.imol['Water']
tot_TAL = H401_ins_0.imol['TAL']
H401_spec_obj_fn = lambda T: get_mol_TAL_dissolved(T, H401_ins_0_water) - tot_TAL
H401.T = flx.IQ_interpolation(H401_spec_obj_fn, H401.ins[0].T, 99.+273.15)
H401._run()
H401_outs_0 = H401.outs[0]
mol_TAL_dissolved = get_mol_TAL_dissolved(H401_outs_0.T, H401_outs_0.imol['Water'])
in(mol_TAL_dissolved, tot_TAL)
H401_outs_0.imol['s', 'TAL'] = max(0., round(tot_TAL - min(mol_TAL_dissolved, tot_TAL), 5))
H401.specification = H401_spec
U402 = bst.FakeSplitter('U402', ins=H401-0, outs = ('thermally_decarboxylated_broth','vented_CO2'))
U402.decarboxylation_rxns = ParallelRxn([
Rxn('TAL + H2O -> PD + CO2', 'TAL', 0.25),
])
def get_TAL_decarboxylation_conversion(T=273.15+80.):
return (0.2*(T-273.15) + 8.)/100.
def U402_spec():
U402_outs_0 = U402.outs[0]
U402_outs_0.copy_like(U402.ins[0])
U402_outs_0.phases = ('l', 's')
U402.decarboxylation_rxns[0].X = get_TAL_decarboxylation_conversion(T=U402_outs_0.T)
U402.decarboxylation_rxns[0](U402_outs_0['l'])
U402.outs[1].imol['CO2'] = U402_outs_0.imol['l', 'CO2']
U402.outs[1].phase = 'g'
U402_outs_0.imol['l', 'CO2'] = 0.
U402.specification = U402_spec
lit = [splits_df['stream_571'][0]] + splits_df['stream_571'][2:].to_list()
S401_filtrate_split = [splits_df['stream_535'][0]] + splits_df['stream_535'][2:].to_list()
S401 = bst.units.SolidsCentrifuge('S401', ins=U402-0, outs=('S401_solid_fraction', 'S401_liquid_fraction'),
split=find_split(S401_index,
S401_cell_mass_split,
S401_filtrate_split,
chemical_groups),
solids =\
['Xylan', 'Glucan', 'Lignin', 'FermMicrobe',\
'Ash', 'Arabinan', 'Galactan', 'Mannan'])
S401_TAL_split_spec():
S401._run()
S401_ins_0 = S401.ins[0]
S401.outs[0].imol['TAL'] = S401_ins_0.imol['s', 'TAL']
S401.outs[1].imol['TAL'] = S401_ins_0.imol['l', 'TAL']
S401.specification = S401_TAL_split_spec
H402 = bst.HXutility('H402', ins=S401-1, outs=('H402_0'), T=273.15+1.)
def H402_spec():
H402._run()
H402_ins_0 = H402.ins[0]
tot_TAL = H402_ins_0.imol['TAL']
H402_outs_0 = H402.outs[0]
TAL_solubility = get_mol_TAL_dissolved(H402_outs_0.T, H402_outs_0.imol['Water'])
H402_outs_0.phases = ('s', 'l')
H402_outs_0.T = H402.T
TAL_dissolved = min(TAL_solubility, tot_TAL)
H402_outs_0.imol['l', 'TAL'] = TAL_dissolved
H402_outs_0.imol['s', 'TAL'] = max(0, tot_TAL - TAL_dissolved)
H402.specification = H402_spec
S402 = bst.units.SolidsCentrifuge('S402', ins=H402-0, outs=('S402_solid_fraction', 'S402_liquid_fraction'),
split=find_split(S401_index,
S401_cell_mass_split,
S401_filtrate_split,
chemical_groups), solids =\
['Xylan', 'Glucan', 'Lignin', 'FermMicrobe',\
'Ash', 'Arabinan', 'Galactan', 'Mannan'])
def S402_TAL_split_spec():
float(S402_ins_0.imol['s', 'TAL'])
S402_ins_0.imol['s', 'TAL'] = 0.
S402._run()
S402.outs[0].imol['TAL'] = solid_TAL
S402.outs[1].imol['TAL'] = S402_ins_0.imol['l', 'TAL']
S402_ins_0.imol['s', 'TAL'] = solid_TAL
S402.specification = S402_TAL_split_spec
H403 = bst.HXutility('H403', ins=S402-0, outs=('heated_TAL'), T=273.15+40.)
F401 = bst.Flash('F401', ins=H403-0, outs = ('volatiles', 'pure_TAL_product'), V = 0.99, P=101325.)
def F401_spec():
F401_ins_0 = F401.ins[0]
F401.V = sum(F401_ins_0.imol['H2O',
'AceticAcid',
'Furfural',
'HMF',]) / F401_ins_0.F_mol
F401._run()
F401.specification = F401_spec
air_lagoon = Stream('air_lagoon', phase='g', units='kg/hr')
aerobic_caustic = Stream('aerobic_caustic', units='kg/hr', T=20+273.15, P=2*101325,
price=price['Caustics'])
M501 = bst.units.Mixer('M501', ins=(
S402-1,
F401-0,
))
WWT_cost = units.WastewaterSystemCost('WWTcost501', ins=M501-0)
R501 = units.AnaerobicDigestion('R501', ins=WWT_cost-0,
outs=('biogas', 'anaerobic_treated_water',
'anaerobic_sludge'),
reactants=soluble_organics,
split=find_split(splits_df.index,
splits_df['stream_611'],
splits_df['stream_612'],
chemical_groups),
T=35+273.15)
get_flow_tpd = lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/907.185
M502 = bst.units.Mixer('M502', ins=(R501-1, ''))
R502 = units.AerobicDigestion('R502', ins=(M502-0, air_lagoon, aerobic_caustic),
outs=('aerobic_vent', 'aerobic_treated_water'),
reactants=soluble_organics,
ratio=get_flow_tpd()/2205)
S501 = bst.units.Splitter('S501', ins=R502-1, outs=('membrane_treated_water',
'membrane_sludge'),
split=find_split(splits_df.index,
splits_df['stream_624'],
splits_df['stream_625'],
chemical_groups))
S501.line = 'Membrane bioreactor'
S502 = bst.units.Splitter('S502', ins=S501-1, outs=('to_aerobic_digestion',
'to_boiler_turbogenerator'),
split=0.96)
M503 = bst.units.Mixer('M503', ins=(S502-0, 'centrate'), outs=1-M502)
M504 = bst.units.Mixer('M504', ins=(R501-2, S502-1))
S503 = bst.units.Splitter('S503', ins=M504-0, outs=(1-M503, 'sludge'),
split=find_split(splits_df.index,
splits_df['stream_616'],
splits_df['stream_623'],
chemical_groups))
S503.line = 'Sludge centrifuge'
S504 = bst.units.Splitter('S504', ins=S501-0, outs=('discharged_water', 'waste_brine'),
split=find_split(splits_df.index,
splits_df['stream_626'],
splits_df['stream_627'],
chemical_groups))
S504.line = 'Reverse osmosis'
M505 = bst.units.Mixer('M505', ins=(S503-1,
S401-0,
),
outs='wastes_to_boiler_turbogenerator')
sulfuric_acid_fresh = Stream('sulfuric_acid_fresh', price=price['Sulfuric acid'])
ammonia_fresh = Stream('ammonia_fresh', price=price['AmmoniumHydroxide'])
CSL_fresh = Stream('CSL_fresh', price=price['CSL'])
HCl_fresh = Stream('HCl_fresh', price=price['HCl'])
hexanol_fresh = Stream('hexanol_fresh', price=price['Hexanol'])
heptane_fresh_s = Stream('heptane_fresh_s', price=price['Heptane'])
toluene_fresh_s = Stream('toluene_fresh_s', price=price['Toluene'])
hydrogen_fresh = Stream('hydrogen_fresh', price=price['Hydrogen'])
KOH_fresh = Stream('KOH_fresh', price=price['KOH'])
system_makeup_water = Stream('system_makeup_water', price=price['Makeup water'])
SA = Stream('SA', units='kg/hr', price=price['SA'])
Stream('ash', price=price['Ash disposal'])
cooling_tower_chems = Stream('cooling_tower_chems', price=price['Cooling tower chems'])
CIP_chems_in = Stream('CIP_chems_in', Water=145*get_flow_tpd()/2205, units='kg/hr')
plant_air_in = Stream('plant_air_in', phase='g', units='kg/hr',
N2=0.79*1372608*get_flow_tpd()/2205,
O2=0.21*1372608*get_flow_tpd()/2205)
fire_water_in = Stream('fire_water_in',
Water=8021*get_flow_tpd()/2205, units='kg/hr')
T603 = units.CSLstorageTank('T603', ins=CSL_fresh, outs=CSL)
T603.line = 'CSL storage tank'
T604 = units.DPHPStorageTank('T604', ins=hexanol_fresh)
T604.line = 'Hexanol storage tank'
T604_P = units.TALPump('T604_P', ins=T604-0, outs = Hexanol_minimal)
h)
# T605.line = 'Heptane storage tank'
# T605_P = units.TALPump('T605_P', ins=T605-0, outs = Heptane)
# T606 = units.DPHPStorageTank('T606', ins=toluene_fresh)
# T606.line = 'Toluene storage tank'
# T606_P = units.TALPump('T606_P', ins=T606-0, outs = Toluene)
T607 = units.DPHPStorageTank('T607', ins=hydrogen_fresh, outs = Hydrogen)
T607.line = 'Hydrogen storage tank'
T608 = units.DPHPStorageTank('T608', ins=HCl_fresh, outs = HCl,
vessel_material = 'Stainless steel')
T608.line = 'HCl storage tank'
T609 = units.DPHPStorageTank('T609', ins=KOH_fresh, outs = KOH,
vessel_material = 'Stainless steel')
T609.line = 'KOH storage tank'
# T604_s = units.DPHPStorageTank('T604_s', ins=hexanol_fresh_s)
# T604_s.line = 'Hexanol storage tank s'
# T604_s_P = units.TALPump('T604_s_P', ins=T604_s-0, outs = Hexanol_s)
# 7-day storage time, similar to ethanol's in Humbird et al.
T605_s = units.DPHPStorageTank('T605_s', ins=heptane_fresh_s)
T605_s.line = 'Heptane storage tank s'
T605_s_P = units.TALPump('T605_s_P', ins=T605_s-0, outs = Heptane_s)
T606_s = units.DPHPStorageTank('T606_s', ins=toluene_fresh_s)
T606_s.line = 'Toluene storage tank s'
T606_s_P = units.TALPump('T606_s_P', ins=T606_s-0, outs = Toluene_s)
T620 = units.TALStorageTank('T620', ins=F401-1, tau=7*24, V_wf=0.9,
vessel_type='Floating roof',
vessel_material='Stainless steel')
T620.line = 'SAStorageTank'
T620_P = units.TALPump('T620_P', ins=T620-0, outs=SA)
# # 7-day storage time, similar to ethanol's in Humbird et al.
=7*24, V_wf=0.9,
# vessel_type='Floating roof',
# vessel_material='Stainless steel')
# T608.line = 'IBAStorageTank'
# T608_P = units.TALPump('T608_P', ins=T608-0, outs=IBA)
CIP = facilities.CIP('CIP901', ins=CIP_chems_in, outs='CIP_chems_out')
ADP = facilities.ADP('ADP902', ins=plant_air_in, outs='plant_air_out',
ratio=get_flow_tpd()/2205)
FWT = units.FireWaterTank('FWT903', ins=fire_water_in, outs='fire_water_out')
CWP = facilities.CWP('CWP802', ins='return_chilled_water',
outs='process_chilled_water')
# M505-0 is the liquid/solid mixture, R501-0 is the biogas, blowdown is discharged
# BT = facilities.BT('BT', ins=(M505-0, R501-0,
# FGD_lime, boiler_chems,
# baghouse_bag, natural_gas,
# 'BT_makeup_water'),
# B_eff=0.8, TG_eff=0.85,
# combustibles=combustibles,
# side_streams_to_heat=(water_M201, water_M202, steam_M203),
# outs=('gas_emission', ash, 'boiler_blowdown_water'))
BT = bst.facilities.BoilerTurbogenerator('BT701',
ins=(M505-0,
R501-0,
'boiler_makeup_water',
'natural_gas',
'lime',
'boilerchems'),
outs=('gas_emission', 'boiler_blowdown_water', ash,),
turbogenerator_efficiency=0.85)
# BT = bst.BDunits.BoilerTurbogenerator('BT',
# ins=(M505-0, R501-0, 'boiler_makeup_water', 'natural_gas', FGD_lime, boiler_chems),
# boiler_efficiency=0.80,
# turbogenerator_efficiency=0.85)
# Blowdown is discharged
CT = facilities.CT('CT801', ins=('return_cooling_water', cooling_tower_chems,
'CT_makeup_water'),
outs=('process_cooling_water', 'cooling_tower_blowdown'))
# All water used in the system, here only consider water usage,
# if heating needed, then heeating duty required is considered in BT
process_water_streams = (enzyme_water,
aerobic_caustic,
CIP.ins[-1], BT.ins[-1], CT.ins[-1])
PWC = facilities.PWC('PWC904', ins=(system_makeup_water, S504-0),
process_water_streams=process_water_streams,
recycled_blowdown_streams=None,
outs=('process_water', 'discharged_water'))
# Heat exchanger network
from hxn._heat_exchanger_network import HeatExchangerNetwork
# from biosteam import HeatExchangerNetwork
HXN = HeatExchangerNetwork('HXN1001',
# ignored=[H401, H402],
)
def HXN_no_run_cost():
HXN.heat_utilities = tuple()
HXN._installed_cost = 0.
# To simulate without HXN, uncomment the following 3 lines:
# HXN._cost = HXN_no_run_cost
# HXN.energy_balance_percent_error = 0.
# HXN.new_HXs = HXN.new_HX_utils = []
# HXN = HX_Network('HXN')
# %%
# =============================================================================
# Complete system
# =============================================================================
TAL_sys = create_TAL_sys()
f = bst.main_flowsheet
u = f.unit
s = f.stream
feedstock = s.feedstock
SA = s.SA
get_flow_tpd = lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/907.185
TEA_feeds = set([i for i in TAL_sys.feeds if i.price]+ \
[i for i in TAL_sys.feeds if i.price])
TEA_products = set([i for i in TAL_sys.products if i.price]+ \
[i for i in TAL_sys.products if i.price]+[SA])
for ui in u:
globals().update({ui.ID: ui})
# %%
# =============================================================================
# TEA
# =============================================================================
# TAL_tea = CellulosicEthanolTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
# depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
# lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
# startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
# startup_VOCfrac=0.75, WC_over_FCI=0.05,
# finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# # biosteam Splitters and Mixers have no cost,
# # cost of all wastewater treatment units are included in WWT_cost,
# # BT is not included in this TEA
# OSBL_units=(u.U101, u.WWT_cost,
# u.T601, u.T602, u.T603, u.T606, u.T606_P,
# u.CWP, u.CT, u.PWC, u.CIP, u.ADP, u.FWT, u.BT),
# warehouse=0.04, site_development=0.09, additional_piping=0.045,
# proratable_costs=0.10, field_expenses=0.10, construction=0.20,
# contingency=0.10, other_indirect_costs=0.10,
# labor_cost=3212962*get_flow_tpd()/2205,
# labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
# steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT)
# TAL_no_BT_tea = TAL_tea
TAL_tea = TALTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
startup_VOCfrac=0.75, WC_over_FCI=0.05,
finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# biosteam Splitters and Mixers have no cost,
# cost of all wastewater treatment units are included in WWT_cost,
# BT is not included in this TEA
OSBL_units=(u.U101, u.WWTcost501,
# u.T601, u.T602,
u.T603, u.T604, u.T620,
# u.T606, u.T606_P,
u.CWP802, u.CT801, u.PWC904, u.CIP901, u.ADP902, u.FWT903, u.BT701),
warehouse=0.04, site_development=0.09, additional_piping=0.045,
proratable_costs=0.10, field_expenses=0.10, construction=0.20,
contingency=0.10, other_indirect_costs=0.10,
labor_cost=3212962*get_flow_tpd()/2205,
labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT701)
TAL_no_BT_tea = TAL_tea
# # Removed because there is not double counting anyways.
# # Removes feeds/products of BT_sys from TAL_sys to avoid double-counting
# for i in BT_sys.feeds:
# TAL_sys.feeds.remove(i)
# for i in BT_sys.products:
# TAL_sys.products.remove(i)
# Boiler turbogenerator potentially has different depreciation schedule
# BT_tea = bst.TEA.like(BT_sys, TAL_no_BT_tea)
# BT_tea.labor_cost = 0
# Changed to MACRS 20 to be consistent with Humbird
# BT_tea.depreciation = 'MACRS20'
# BT_tea.OSBL_units = (BT,)
# %%
# =============================================================================
# Simulate system and get results
# =============================================================================
# def get_TAL_MPSP():
# TAL_sys.simulate()
# for i in range(3):
# TAL.price = TAL_tea.solve_price(TAL, TAL_no_BT_tea)
# return TAL.price
def get_SA_MPSP():
for i in range(3):
TAL_sys.simulate()
for i in range(3):
SA.price = TAL_tea.solve_price(SA)
return SA.price*SA.F_mass/SA.imass['TAL']
def get_titer():
return R302.outs[0].imass['TAL']/R302.outs[0].F_vol
def set_titer(titer):
M304.water_multiplier *= get_titer()/titer
get_SA_MPSP()
return get_titer()
# get_SA_MPSP()
# R301 = F('R301') # Fermentor
# yearly_production = 125000 # ton/yr
spec = ProcessSpecification(
evaporator = None,
pump = None,
mixer = u.M304,
heat_exchanger = u.M304_H,
seed_train_system = [],
reactor= u.R302,
reaction_name='fermentation_reaction',
substrates=('Xylose', 'Glucose'),
products=('TAL',),
spec_1=0.19,
spec_2=28.,
spec_3=0.19,
xylose_utilization_fraction = 0.80,
feedstock = feedstock,
dehydration_reactor = None,
byproduct_streams = [],
HXN = u.HXN1001,
maximum_inhibitor_concentration = 1.,
# pre_conversion_units = process_groups_dict['feedstock_group'].units + process_groups_dict['pretreatment_group'].units + [u.H301], # if the line below does not work (depends on BioSTEAM version)
pre_conversion_units = TAL_sys.split(u.M304.ins[0])[0],
# set baseline fermentation performance here
baseline_yield = 0.19,
baseline_titer = 28.,
baseline_productivity = 0.19,
# baseline_yield = 0.30,
# baseline_titer = 25.,
# baseline_productivity = 0.19,
feedstock_mass = feedstock.F_mass,
pretreatment_reactor = None)
spec.load_spec_1 = spec.load_yield
# spec.load_spec_2 = spec.load_titer
spec.load_spec_3 = spec.load_productivity
def M304_titer_obj_fn(water_to_sugar_mol_ratio):
M304, R302 = u.M304, u.R302
M304.water_to_sugar_mol_ratio = water_to_sugar_mol_ratio
M304.specification[0][0]()
u.M304_H._run()
u.S302._run()
u.R303._run()
u.T301._run()
R302.specification[0][0]()
# broth = R302.outs[0]
# return broth.imass['TAL']/broth.F_vol - R302.titer_to_load
return R302.effluent_titer - R302.titer_to_load
def load_titer_with_glucose(titer_to_load):
spec.spec_2 = titer_to_load
u.R302.titer_to_load = titer_to_load
flx.IQ_interpolation(M304_titer_obj_fn, 1e-3, 20000.)
# u.AC401.regeneration_velocity = min(14.4, 3.1158 + ((14.4-3.1158)/(30.-3.))*(titer_to_load-3.)) # heuristic to obtain regeneration velocity at which MPSP is minimum fitted to results from simulations at target_recovery=0.99
# u.AC401.regeneration_velocity = 14.4
spec.load_spec_2 = load_titer_with_glucose
# path = (F301, R302)
# @np.vectorize
# def calculate_titer(V):
# F301.V = V
# for i in path: i._run()
# return spec._calculate_titer()
# @np.vectorize
# def calculate_MPSP(V):
# F301.V = V
# TAL_sys.simulate()
# MPSP = SA.price = TAL_tea.solve_price(SA, TAL_no_BT_tea)
# return MPSP
# vapor_fractions = np.linspace(0.20, 0.80)
# titers = calculate_titer(vapor_fractions)
# MPSPs = calculate_MPSP(vapor_fractions)
# import matplotlib.pyplot as plt
# plt.plot(vapor_fractions, titers)
# plt.show()
# plt.plot(titers, MPSPs)
# plt.show()
# %%
# =============================================================================
# Life cycle analysis (LCA), waste disposal emission not included
# =============================================================================
# 100-year global warming potential (GWP) from material flows
LCA_streams = TEA_feeds.copy()
LCA_stream = Stream('LCA_stream', units='kg/hr')
def get_material_GWP():
LCA_stream.mass = sum(i.mass for i in LCA_streams)
chemical_GWP = LCA_stream.mass*CFs['GWP_CF_stream'].mass
# feedstock_GWP = feedstock.F_mass*CFs['GWP_CFs']['Corn stover']
return chemical_GWP.sum()/SA.F_mass
# GWP from combustion of non-biogenic carbons
get_non_bio_GWP = lambda: (natural_gas.get_atomic_flow('C'))* TAL_chemicals.CO2.MW / SA.F_mass
# +ethanol_fresh.get_atomic_flow('C')) \
# GWP from electricity
get_electricity_use = lambda: sum(i.power_utility.rate for i in TAL_sys.units)
get_electricity_GWP = lambda: get_electricity_use()*CFs['GWP_CFs']['Electricity'] \
/ SA.F_mass
# CO2 fixed in lactic acid product
get_fixed_GWP = lambda: \
SA.get_atomic_flow('C')*TAL_chemicals.CO2.MW/SA.F_mass
# carbon_content_of_feedstock = 0
get_GWP = lambda: get_material_GWP()+get_non_bio_GWP()+get_electricity_GWP()
# Fossil energy consumption (FEC) from materials
def get_material_FEC():
LCA_stream.mass = sum(i.mass for i in LCA_streams)
chemical_FEC = LCA_stream.mass*CFs['FEC_CF_stream'].mass
# feedstock_FEC = feedstock.F_mass*CFs['FEC_CFs']['Corn stover']
return chemical_FEC.sum()/SA.F_mass
# FEC from electricity
get_electricity_FEC = lambda: \
get_electricity_use()*CFs['FEC_CFs']['Electricity']/SA.F_mass
# Total FEC
get_FEC = lambda: get_material_FEC()+get_electricity_FEC()
# get_SPED = lambda: BT.system_heating_demand*0.001/SA.F_mass
SA_LHV = 31.45 # MJ/kg SA
# %% Full analysis
def simulate_and_print():
get_SA_MPSP()
print('\n---------- Simulation Results ----------')
print(f'MPSP is ${get_SA_MPSP():.3f}/kg')
# print(f'GWP is {get_GWP():.3f} kg CO2-eq/kg SA')
# print(f'FEC is {get_FEC():.2f} MJ/kg SA or {get_FEC()/SA_LHV:.2f} MJ/MJ SA')
# print(f'SPED is {get_SPED():.2f} MJ/kg SA or {get_SPED()/SA_LHV:.2f} MJ/MJ SA')
# print('--------------------\n')
# simulate_and_print()
# TAL_sys.simulate()
get_SA_MPSP()
spec.load_specifications(0.203, 35.9, 0.21)
simulate_and_print()
# %%
# =============================================================================
# For Monte Carlo and analyses
# =============================================================================
TAL_sub_sys = {
# 'feedstock_sys': (U101,),
# 'pretreatment_sys': (T201, M201, M202, M203,
# R201, R201_H, T202, T203,
# F201, F201_H,
# M204, T204, T204_P,
# M205, M205_P),
# 'conversion_sys': (H301, M301, M302, R301, R302, T301),
# 'separation_sys': (S401, M401, M401_P,
# S402,
# # F401, F401_H, X401,
# D401, D401_H, D401_P, S403,
# M402_P, S403,
# D403, D403_H, D403_P,
# M501,
# T606, T606_P, T607, T607_P)
# F402, F402_H, F402_P,
# D405, D405_H1, D405_H2, D405_P,
# M401, M401_P)
# 'wastewater_sys': (M501, WWT_cost, R501,
# M502, R502, S501, S502, M503,
# M504, S503, S504, M505),
# 'HXN': (HXN,),
# 'BT': (BT,),
# 'CT': (CT,),
# 'other_facilities': (T601, S601,
# T602, T603,
# T604, T604_P,
# T605, T605_P,
# T606, T606_P,
# PWC, CIP, ADP, FWT)
}
# for unit in sum(TAL_sub_sys.values(), ()):
# if not unit in TAL_sys.units:
# print(f'{unit.ID} not in TAL_sys.units')
# for unit in TAL_sys.units:
# if not unit in sum(TAL_sub_sys.values(), ()):
# print(f'{unit.ID} not in TAL_sub_sys') | true | true |
f72090137ed4a55b46aada64a6912f716757178f | 11,993 | py | Python | qusp/target.py | dmargala/qusp | 3b08e8bea321f7083f00756f558c1d0af0eddd07 | [
"MIT"
] | null | null | null | qusp/target.py | dmargala/qusp | 3b08e8bea321f7083f00756f558c1d0af0eddd07 | [
"MIT"
] | null | null | null | qusp/target.py | dmargala/qusp | 3b08e8bea321f7083f00756f558c1d0af0eddd07 | [
"MIT"
] | null | null | null | """
Provides support for working with BOSS targets.
In qusp, a target is identified by a unique plate-mjd-fiber. They are implemented as dictionaries and
must have at least 'plate', 'mjd', and 'fiber' keys specified. The Target model is designed to be flexible,
in that other attributes can be added to targets as needed.
Examples
--------
Construct a target from a string identifier::
target = qusp.target.Target.from_string('plate-mjd-fiber')
Construct a target from a dictionary::
target = qusp.target.Target({'target':'plate-mjd-fiber'})
Read a target list along with **ra**, **dec**, and **z** columns::
targets = qusp.target.load_target_list(filename,
fields=[('ra', float, 1), ('dec', float, 2), ('z', float, 3)])
Save a target list along with **z** and **sn** fields::
qusp.target.save_target_list(filename, targets, fields=['z', 'sn'])
Iterate over combined spectra for a list targets::
for target, spectrum in qusp.target.get_combined_spectra(targets):
...
Iterate over plates for a list of targets::
for target, spplate in qusp.target.get_target_plates(targets):
spectrum = qusp.read_combined_spectrum(spplate, target)
...
-------------
"""
import numpy as np
from astropy.io import fits
import os
import qusp
class Target(dict):
"""
Represents a BOSS target.
Args:
args: Variable length argument list.
kwargs: Arbitrary keyword arguments.
Raises:
AssertionError: if 'target' key is not specified
"""
def __init__(self, *args, **kwargs):
super(Target, self).__init__(*args, **kwargs)
# assert 'target' in self, \
# 'Target: must have plate-mjd-fiber identifier key'
# plate, mjd, fiber = self['target'].split('-')
self['target'] = '{}-{}-{}'.format(self['plate'], self['mjd'], self['fiber'])
self['plate'] = int(self['plate'])
self['mjd'] = int(self['mjd'])
self['fiber'] = int(self['fiber'])
def to_string(self):
"""
Returns the standard plate-mjd-fiber string represntation of the target.
Returns:
plate-mjd-fiber string represntation of target
"""
return self['target']
@classmethod
def from_string(cls, target_string):
"""
Returns a Target object constructed from a target string identifier.
Args:
target_string (str): a target string identifier.
Returns:
:class:`Target` object
"""
plate, mjd, fiber = target_string.split('-')
return cls({'plate':plate, 'mjd':mjd, 'fiber':fiber})
# return cls({'target':target_string})
@classmethod
def from_plate_mjd_fiber(cls, plate, mjd, fiber):
"""
Returns a Target object constructed from plate, mjd, fiber.
Args:
plate (int): target's plate id
mjd (int): mjd observation
fiber (int): target's fiber id
Returns:
:class:`Target` object
"""
return cls({'plate':plate, 'mjd':mjd, 'fiber':fiber})
# target_string = '-'.join([str(field) for field in [plate, mjd, fiber]])
# return cls.from_string(target_string)
def load_target_list(filename, fields=None, verbose=False):
"""
Loads a target data from a text file.
The first column must be plate-mjd-fiber target identifier.
Use the fields argument to specify additional columns to
read. Must specify a (name, type, column index) tuple for each field.
Args:
filename (str): The filename to load.
fields (list, optional): A list of columns to read, see example.
Defaults to None.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
Returns:
list of :class:`Target` objects.
"""
if fields is None:
fields = []
fields = [('plate', 'S4', 0), ('mjd', 'S5', 1), ('fiber', 'S4', 2)] + fields
names, formats, cols = zip(*fields)
if verbose:
print 'Target list: %s' % filename
print 'Reading fields: %s' % (', '.join(names))
targets = np.genfromtxt(
filename, dtype={'names':names, 'formats':formats}, usecols=cols, skip_header=1)
return [Target(dict(zip(targets.dtype.names, t))) for t in targets]
def add_args(parser):
"""
Adds arguments to the provided command-line parser.
Args:
parser (argparse.ArgumentParser): an argument parser
"""
parser.add_argument(
"--targets", type=str, default=None,
help="text file containing target list")
parser.add_argument(
"--ntargets", type=int, default=0,
help="number of targets to use, 0 for all")
def load_target_list_from_args(args, fields=None):
"""
Loads a target list from a text file specified using the default target arguments.
Args:
args (argparse.Namespace): argparse argument namespace
fields (list, optional): A list of columns to read, see example.
Defaults to None.
Returns:
list of :class:`Target` objects.
"""
target_list = load_target_list(args.targets, fields=fields)
# trim target list if requested
ntargets = args.ntargets if args.ntargets > 0 else len(target_list)
print 'Using %d targets (out of %d in file)' % (ntargets, len(target_list))
return target_list[:ntargets]
def save_target_list(filename, targets, fields=None, verbose=False):
"""
Writes a list of targets to the provided file.
By default, only the target plate-mjd-fiber is written to file.
Use the fields argument to specify additional target fields to save.
Args:
filename (str): The filename of the output text file to create.
targets (:class:`Target`): A list of :class:`Target` objects to save.
fields (list, optional): A list of :class:`Target` keys to
annotate output list. Defaults to None.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
"""
keys = ['target']
if fields is not None:
keys.extend(fields if type(fields) is list else [fields])
if verbose:
print 'Saving targets to %s w/ fields: %s' % (filename, ', '.join(keys))
with open(filename, 'w') as outfile:
for target in targets:
outfile.write(' '.join([str(target[key]) for key in keys])+'\n')
def get_target_plates(targets, boss_path=None, sort=True, verbose=False):
"""
A generator that yields (target,spplate) tuples for the provided list of
targets. With sort=True, the targets will be sorted by plate-mjd-fiber to
reduce the number of io operations.
Args:
targets (:class:`Target`): list of :class:`Target` objects
to iterate through.
boss_path (str, optional): path to boss data directory. Default is to
look this up using env var.
sort (bool, optional): Whether or not to sort the provided targets
by plate-mjd-fiber. Defaults to True.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
Yields:
The next tuple ``(target, spplate)``, where ``target`` is a :class:`Target` and \
``spplate`` is the corresponding FITS file containing its coadded spectrum from the list of *targets*.
"""
if boss_path is None:
paths = qusp.paths.Paths()
boss_path = paths.boss_path
if sort:
targets = sorted(
targets, key=lambda t: (t['plate'], t['mjd'], t['fiber']))
currently_opened_filename = None
for target in targets:
plate_filename = 'spPlate-%s-%s.fits' % (target['plate'], target['mjd'])
if plate_filename != currently_opened_filename:
# load the spectrum file
if currently_opened_filename is not None:
spplate.close()
full_path_to_spplate = os.path.join(
boss_path, str(target['plate']), plate_filename)
if verbose:
print 'Opening plate file %s...' % full_path_to_spplate
spplate = fits.open(full_path_to_spplate)
currently_opened_filename = plate_filename
yield target, spplate
def get_combined_spectra(targets, paths=None, sort=True, verbose=False, tpcorr=None):
"""
A generator that yields (target, spectrum) tuples for the provided list of
targets. With sort=True, the targets will be sorted by plate-mjd-fiber to
reduce the number of io operations.
Args:
targets (:class:`Target`): list of :class:`Target` objects
to iterate through.
boss_path (str, optional): path to boss data directory. Default is to
look this up using env var.
sort (bool, optional): Whether or not to sort the provided targets
by plate-mjd-fiber. Defaults to True.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
Yields:
The next tuple ``(target, spectrum)``, where ``target`` is a :class:`Target` and \
``spectrum`` is a :class:`qusp.spectrum.Spectrum` that corresponds to the \
target's coadded spectrum.
"""
for target, spplate in get_target_plates(targets, boss_path=paths.boss_path, sort=sort, verbose=verbose):
if tpcorr is not None:
try:
corrected = get_corrected_spectrum(target, tpcorr, paths)
yield target, corrected
except KeyError:
if verbose:
print 'get_combined_spectra: Error reading correction for %s' % target.to_string()
continue
else:
yield target, qusp.spectrum.read_combined_spectrum(spplate, target)
def get_combined_spectrum(target, paths=None):
"""
Returns the coadded spectrum of the specified target.
Args:
target (:class:`Target`): a target
paths (:class:`qusp.paths.Paths`, optional): paths object that knows
where the location of the boss data dir.
Returns:
Coadded spectrum of the specified target.
"""
if paths is None:
paths = qusp.paths.Paths()
plate_filename = paths.get_spplate_filename(target)
spplate = fits.open(plate_filename)
return qusp.spectrum.read_combined_spectrum(spplate, target)
def get_lite_spectrum(target, paths=None):
if paths is None:
paths = qusp.paths.Paths()
spec_filename = paths.get_spec_filename(target, lite=True)
spec = fits.open(spec_filename)
return qusp.spectrum.read_lite_spectrum(spec)
def get_lite_spectra(targets):
import bossdata.path
import bossdata.remote
try:
finder = bossdata.path.Finder()
mirror = bossdata.remote.Manager()
except ValueError as e:
print(e)
yield StopIteration
for target in targets:
remote_path = finder.get_spec_path(plate=target['plate'], mjd=target['mjd'], fiber=target['fiber'], lite=True)
local_path = mirror.get(remote_path)
spec = fits.open(local_path)
yield target, qusp.spectrum.read_lite_spectrum(spec)
def get_corrected_spectrum(target, tpcorr, paths=None):
"""
Returns the coadded spectrum of the specified target.
Args:
target (:class:`Target`): a target
tpcorr (hdf5 File object): hdf5 file with throughput corrections
paths (:class:`qusp.paths.Paths`, optional): paths object that knows
where the location of the boss data dir.
Returns:
Coadded spectrum of the specified target.
"""
from scipy.interpolate import interp1d
combined = get_combined_spectrum(target, paths)
wave = tpcorr['wave'].value
value = tpcorr['%s/%s/%s' % (target['plate'], target['mjd'], target['fiber'])].value
correction = interp1d(wave, value, kind='linear', copy=False)
return combined.create_corrected(correction)
| 35.482249 | 118 | 0.636455 | """
Provides support for working with BOSS targets.
In qusp, a target is identified by a unique plate-mjd-fiber. They are implemented as dictionaries and
must have at least 'plate', 'mjd', and 'fiber' keys specified. The Target model is designed to be flexible,
in that other attributes can be added to targets as needed.
Examples
--------
Construct a target from a string identifier::
target = qusp.target.Target.from_string('plate-mjd-fiber')
Construct a target from a dictionary::
target = qusp.target.Target({'target':'plate-mjd-fiber'})
Read a target list along with **ra**, **dec**, and **z** columns::
targets = qusp.target.load_target_list(filename,
fields=[('ra', float, 1), ('dec', float, 2), ('z', float, 3)])
Save a target list along with **z** and **sn** fields::
qusp.target.save_target_list(filename, targets, fields=['z', 'sn'])
Iterate over combined spectra for a list targets::
for target, spectrum in qusp.target.get_combined_spectra(targets):
...
Iterate over plates for a list of targets::
for target, spplate in qusp.target.get_target_plates(targets):
spectrum = qusp.read_combined_spectrum(spplate, target)
...
-------------
"""
import numpy as np
from astropy.io import fits
import os
import qusp
class Target(dict):
"""
Represents a BOSS target.
Args:
args: Variable length argument list.
kwargs: Arbitrary keyword arguments.
Raises:
AssertionError: if 'target' key is not specified
"""
def __init__(self, *args, **kwargs):
super(Target, self).__init__(*args, **kwargs)
self['target'] = '{}-{}-{}'.format(self['plate'], self['mjd'], self['fiber'])
self['plate'] = int(self['plate'])
self['mjd'] = int(self['mjd'])
self['fiber'] = int(self['fiber'])
def to_string(self):
"""
Returns the standard plate-mjd-fiber string represntation of the target.
Returns:
plate-mjd-fiber string represntation of target
"""
return self['target']
@classmethod
def from_string(cls, target_string):
"""
Returns a Target object constructed from a target string identifier.
Args:
target_string (str): a target string identifier.
Returns:
:class:`Target` object
"""
plate, mjd, fiber = target_string.split('-')
return cls({'plate':plate, 'mjd':mjd, 'fiber':fiber})
@classmethod
def from_plate_mjd_fiber(cls, plate, mjd, fiber):
"""
Returns a Target object constructed from plate, mjd, fiber.
Args:
plate (int): target's plate id
mjd (int): mjd observation
fiber (int): target's fiber id
Returns:
:class:`Target` object
"""
return cls({'plate':plate, 'mjd':mjd, 'fiber':fiber})
def load_target_list(filename, fields=None, verbose=False):
"""
Loads a target data from a text file.
The first column must be plate-mjd-fiber target identifier.
Use the fields argument to specify additional columns to
read. Must specify a (name, type, column index) tuple for each field.
Args:
filename (str): The filename to load.
fields (list, optional): A list of columns to read, see example.
Defaults to None.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
Returns:
list of :class:`Target` objects.
"""
if fields is None:
fields = []
fields = [('plate', 'S4', 0), ('mjd', 'S5', 1), ('fiber', 'S4', 2)] + fields
names, formats, cols = zip(*fields)
if verbose:
print 'Target list: %s' % filename
print 'Reading fields: %s' % (', '.join(names))
targets = np.genfromtxt(
filename, dtype={'names':names, 'formats':formats}, usecols=cols, skip_header=1)
return [Target(dict(zip(targets.dtype.names, t))) for t in targets]
def add_args(parser):
"""
Adds arguments to the provided command-line parser.
Args:
parser (argparse.ArgumentParser): an argument parser
"""
parser.add_argument(
"--targets", type=str, default=None,
help="text file containing target list")
parser.add_argument(
"--ntargets", type=int, default=0,
help="number of targets to use, 0 for all")
def load_target_list_from_args(args, fields=None):
"""
Loads a target list from a text file specified using the default target arguments.
Args:
args (argparse.Namespace): argparse argument namespace
fields (list, optional): A list of columns to read, see example.
Defaults to None.
Returns:
list of :class:`Target` objects.
"""
target_list = load_target_list(args.targets, fields=fields)
ntargets = args.ntargets if args.ntargets > 0 else len(target_list)
print 'Using %d targets (out of %d in file)' % (ntargets, len(target_list))
return target_list[:ntargets]
def save_target_list(filename, targets, fields=None, verbose=False):
"""
Writes a list of targets to the provided file.
By default, only the target plate-mjd-fiber is written to file.
Use the fields argument to specify additional target fields to save.
Args:
filename (str): The filename of the output text file to create.
targets (:class:`Target`): A list of :class:`Target` objects to save.
fields (list, optional): A list of :class:`Target` keys to
annotate output list. Defaults to None.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
"""
keys = ['target']
if fields is not None:
keys.extend(fields if type(fields) is list else [fields])
if verbose:
print 'Saving targets to %s w/ fields: %s' % (filename, ', '.join(keys))
with open(filename, 'w') as outfile:
for target in targets:
outfile.write(' '.join([str(target[key]) for key in keys])+'\n')
def get_target_plates(targets, boss_path=None, sort=True, verbose=False):
"""
A generator that yields (target,spplate) tuples for the provided list of
targets. With sort=True, the targets will be sorted by plate-mjd-fiber to
reduce the number of io operations.
Args:
targets (:class:`Target`): list of :class:`Target` objects
to iterate through.
boss_path (str, optional): path to boss data directory. Default is to
look this up using env var.
sort (bool, optional): Whether or not to sort the provided targets
by plate-mjd-fiber. Defaults to True.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
Yields:
The next tuple ``(target, spplate)``, where ``target`` is a :class:`Target` and \
``spplate`` is the corresponding FITS file containing its coadded spectrum from the list of *targets*.
"""
if boss_path is None:
paths = qusp.paths.Paths()
boss_path = paths.boss_path
if sort:
targets = sorted(
targets, key=lambda t: (t['plate'], t['mjd'], t['fiber']))
currently_opened_filename = None
for target in targets:
plate_filename = 'spPlate-%s-%s.fits' % (target['plate'], target['mjd'])
if plate_filename != currently_opened_filename:
if currently_opened_filename is not None:
spplate.close()
full_path_to_spplate = os.path.join(
boss_path, str(target['plate']), plate_filename)
if verbose:
print 'Opening plate file %s...' % full_path_to_spplate
spplate = fits.open(full_path_to_spplate)
currently_opened_filename = plate_filename
yield target, spplate
def get_combined_spectra(targets, paths=None, sort=True, verbose=False, tpcorr=None):
"""
A generator that yields (target, spectrum) tuples for the provided list of
targets. With sort=True, the targets will be sorted by plate-mjd-fiber to
reduce the number of io operations.
Args:
targets (:class:`Target`): list of :class:`Target` objects
to iterate through.
boss_path (str, optional): path to boss data directory. Default is to
look this up using env var.
sort (bool, optional): Whether or not to sort the provided targets
by plate-mjd-fiber. Defaults to True.
verbose (bool, optional): Whether or not to print verbose output.
Defaults to False.
Yields:
The next tuple ``(target, spectrum)``, where ``target`` is a :class:`Target` and \
``spectrum`` is a :class:`qusp.spectrum.Spectrum` that corresponds to the \
target's coadded spectrum.
"""
for target, spplate in get_target_plates(targets, boss_path=paths.boss_path, sort=sort, verbose=verbose):
if tpcorr is not None:
try:
corrected = get_corrected_spectrum(target, tpcorr, paths)
yield target, corrected
except KeyError:
if verbose:
print 'get_combined_spectra: Error reading correction for %s' % target.to_string()
continue
else:
yield target, qusp.spectrum.read_combined_spectrum(spplate, target)
def get_combined_spectrum(target, paths=None):
"""
Returns the coadded spectrum of the specified target.
Args:
target (:class:`Target`): a target
paths (:class:`qusp.paths.Paths`, optional): paths object that knows
where the location of the boss data dir.
Returns:
Coadded spectrum of the specified target.
"""
if paths is None:
paths = qusp.paths.Paths()
plate_filename = paths.get_spplate_filename(target)
spplate = fits.open(plate_filename)
return qusp.spectrum.read_combined_spectrum(spplate, target)
def get_lite_spectrum(target, paths=None):
if paths is None:
paths = qusp.paths.Paths()
spec_filename = paths.get_spec_filename(target, lite=True)
spec = fits.open(spec_filename)
return qusp.spectrum.read_lite_spectrum(spec)
def get_lite_spectra(targets):
import bossdata.path
import bossdata.remote
try:
finder = bossdata.path.Finder()
mirror = bossdata.remote.Manager()
except ValueError as e:
print(e)
yield StopIteration
for target in targets:
remote_path = finder.get_spec_path(plate=target['plate'], mjd=target['mjd'], fiber=target['fiber'], lite=True)
local_path = mirror.get(remote_path)
spec = fits.open(local_path)
yield target, qusp.spectrum.read_lite_spectrum(spec)
def get_corrected_spectrum(target, tpcorr, paths=None):
"""
Returns the coadded spectrum of the specified target.
Args:
target (:class:`Target`): a target
tpcorr (hdf5 File object): hdf5 file with throughput corrections
paths (:class:`qusp.paths.Paths`, optional): paths object that knows
where the location of the boss data dir.
Returns:
Coadded spectrum of the specified target.
"""
from scipy.interpolate import interp1d
combined = get_combined_spectrum(target, paths)
wave = tpcorr['wave'].value
value = tpcorr['%s/%s/%s' % (target['plate'], target['mjd'], target['fiber'])].value
correction = interp1d(wave, value, kind='linear', copy=False)
return combined.create_corrected(correction)
| false | true |
f720915b2ad6e84b10d393f7c627605ab0f69c7f | 2,018 | py | Python | tools/Polygraphy/polygraphy/config.py | spradius/TensorRT | eb5de99b523c76c2f3ae997855ad86d3a1e86a31 | [
"Apache-2.0"
] | 1 | 2021-08-23T01:15:16.000Z | 2021-08-23T01:15:16.000Z | tools/Polygraphy/polygraphy/config.py | spradius/TensorRT | eb5de99b523c76c2f3ae997855ad86d3a1e86a31 | [
"Apache-2.0"
] | null | null | null | tools/Polygraphy/polygraphy/config.py | spradius/TensorRT | eb5de99b523c76c2f3ae997855ad86d3a1e86a31 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
INTERNAL_CORRECTNESS_CHECKS = bool(os.environ.get("POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS", "0") != "0")
"""
Whether internal correctness checks are enabled.
This can be configured by setting the 'POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS' environment variable.
"""
AUTOINSTALL_DEPS = bool(os.environ.get("POLYGRAPHY_AUTOINSTALL_DEPS", "0") != "0")
"""
Whether Polygraphy will automatically install required Python packages at runtime.
This can be configured by setting the 'POLYGRAPHY_AUTOINSTALL_DEPS' environment variable.
"""
INSTALL_CMD = os.environ.get("POLYGRAPHY_INSTALL_CMD", "{:} -m pip install".format(sys.executable)).split()
"""
The command to use to automatically install dependencies. Only relevant when AUTOINSTALL_DEPS
is enabled. Defaults to ``["python3", "-m", "pip", "install"]``.
This can be configured by setting the 'POLYGRAPHY_INSTALL_CMD' environment variable to a
string containing the command; for example: ``python3 -m pip install``.
"""
ARRAY_SWAP_THRESHOLD_MB = int(os.environ.get("POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB", "-1"))
"""
The threshold, in megabytes, above which Polygraphy will evict a NumPy array from memory and swap it to disk.
A negative value disables swapping and a value of 0 causes all arrays to be saved to disk.
Disabled by default.
This can be configured by setting the 'POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB' environment variable.
"""
| 43.869565 | 109 | 0.77552 |
import os
import sys
INTERNAL_CORRECTNESS_CHECKS = bool(os.environ.get("POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS", "0") != "0")
AUTOINSTALL_DEPS = bool(os.environ.get("POLYGRAPHY_AUTOINSTALL_DEPS", "0") != "0")
INSTALL_CMD = os.environ.get("POLYGRAPHY_INSTALL_CMD", "{:} -m pip install".format(sys.executable)).split()
ARRAY_SWAP_THRESHOLD_MB = int(os.environ.get("POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB", "-1"))
| true | true |
f720930247112fff9273c3101072dae9279f6fa7 | 4,606 | py | Python | tasks/trace_agent.py | charyveer75/datadog-agent | fdf4d2028b0dccd485eb280f4fefda84931927bc | [
"Apache-2.0"
] | 2 | 2020-02-11T16:05:23.000Z | 2022-03-30T19:50:28.000Z | tasks/trace_agent.py | charyveer75/datadog-agent | fdf4d2028b0dccd485eb280f4fefda84931927bc | [
"Apache-2.0"
] | 2 | 2021-08-11T15:24:27.000Z | 2021-08-23T22:13:05.000Z | tasks/trace_agent.py | charyveer75/datadog-agent | fdf4d2028b0dccd485eb280f4fefda84931927bc | [
"Apache-2.0"
] | null | null | null | import os
import sys
import shutil
import invoke
from invoke import task
from .utils import bin_name, get_build_flags, get_version_numeric_only, load_release_versions
from .utils import REPO_PATH
from .build_tags import get_build_tags, get_default_build_tags, LINUX_ONLY_TAGS, REDHAT_AND_DEBIAN_ONLY_TAGS, REDHAT_AND_DEBIAN_DIST
from .go import deps
BIN_PATH = os.path.join(".", "bin", "trace-agent")
DEFAULT_BUILD_TAGS = [
"netcgo",
"secrets",
"docker",
"kubeapiserver",
"kubelet",
]
@task
def build(ctx, rebuild=False, race=False, precompile_only=False, build_include=None,
build_exclude=None, major_version='7', python_runtimes='3', arch="x64"):
"""
Build the trace agent.
"""
# get env prior to windows sources so we only have to set the target architecture once
ldflags, gcflags, env = get_build_flags(ctx, arch=arch, major_version=major_version, python_runtimes=python_runtimes)
# generate windows resources
if sys.platform == 'win32':
windres_target = "pe-x86-64"
if arch == "x86":
env["GOARCH"] = "386"
windres_target = "pe-i386"
ver = get_version_numeric_only(ctx, env, major_version=major_version)
maj_ver, min_ver, patch_ver = ver.split(".")
ctx.run("windmc --target {target_arch} -r cmd/trace-agent/windows_resources cmd/trace-agent/windows_resources/trace-agent-msg.mc".format(target_arch=windres_target))
ctx.run("windres --define MAJ_VER={maj_ver} --define MIN_VER={min_ver} --define PATCH_VER={patch_ver} -i cmd/trace-agent/windows_resources/trace-agent.rc --target {target_arch} -O coff -o cmd/trace-agent/rsrc.syso".format(
maj_ver=maj_ver,
min_ver=min_ver,
patch_ver=patch_ver,
target_arch=windres_target
))
build_include = DEFAULT_BUILD_TAGS if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
if not sys.platform.startswith('linux'):
for ex in LINUX_ONLY_TAGS:
if ex not in build_exclude:
build_exclude.append(ex)
build_tags = get_build_tags(build_include, build_exclude)
cmd = "go build {race_opt} {build_type} -tags \"{go_build_tags}\" "
cmd += "-o {agent_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/trace-agent"
args = {
"race_opt": "-race" if race else "",
"build_type": "-a" if rebuild else ("-i" if precompile_only else ""),
"go_build_tags": " ".join(build_tags),
"agent_bin": os.path.join(BIN_PATH, bin_name("trace-agent", android=False)),
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run("go generate {REPO_PATH}/pkg/trace/info".format(**args), env=env)
ctx.run(cmd.format(**args), env=env)
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
"""
Run integration tests for trace agent
"""
if install_deps:
deps(ctx)
test_args = {
"go_build_tags": " ".join(get_default_build_tags()),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'INTEGRATION=yes go test {race_opt} -v'.format(**test_args)
prefixes = [
"./pkg/trace/test/testsuite/...",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix))
@task
def cross_compile(ctx, tag=""):
"""
Cross-compiles the trace-agent binaries. Use the "--tag=X" argument to specify build tag.
"""
if not tag:
print("Argument --tag=<version> is required.")
return
print("Building tag %s..." % tag)
env = {
"TRACE_AGENT_VERSION": tag,
"V": tag,
}
ctx.run("git checkout $V", env=env)
ctx.run("mkdir -p ./bin/trace-agent/$V", env=env)
ctx.run("go generate ./pkg/trace/info", env=env)
ctx.run("go get -u github.com/karalabe/xgo")
ctx.run("xgo -dest=bin/trace-agent/$V -go=1.11 -out=trace-agent-$V -targets=windows-6.1/amd64,linux/amd64,darwin-10.11/amd64 ./cmd/trace-agent", env=env)
ctx.run("mv ./bin/trace-agent/$V/trace-agent-$V-windows-6.1-amd64.exe ./bin/trace-agent/$V/trace-agent-$V-windows-amd64.exe", env=env)
ctx.run("mv ./bin/trace-agent/$V/trace-agent-$V-darwin-10.11-amd64 ./bin/trace-agent/$V/trace-agent-$V-darwin-amd64 ", env=env)
ctx.run("git checkout -")
print("Done! Binaries are located in ./bin/trace-agent/%s" % tag)
| 35.430769 | 230 | 0.650673 | import os
import sys
import shutil
import invoke
from invoke import task
from .utils import bin_name, get_build_flags, get_version_numeric_only, load_release_versions
from .utils import REPO_PATH
from .build_tags import get_build_tags, get_default_build_tags, LINUX_ONLY_TAGS, REDHAT_AND_DEBIAN_ONLY_TAGS, REDHAT_AND_DEBIAN_DIST
from .go import deps
BIN_PATH = os.path.join(".", "bin", "trace-agent")
DEFAULT_BUILD_TAGS = [
"netcgo",
"secrets",
"docker",
"kubeapiserver",
"kubelet",
]
@task
def build(ctx, rebuild=False, race=False, precompile_only=False, build_include=None,
build_exclude=None, major_version='7', python_runtimes='3', arch="x64"):
ldflags, gcflags, env = get_build_flags(ctx, arch=arch, major_version=major_version, python_runtimes=python_runtimes)
if sys.platform == 'win32':
windres_target = "pe-x86-64"
if arch == "x86":
env["GOARCH"] = "386"
windres_target = "pe-i386"
ver = get_version_numeric_only(ctx, env, major_version=major_version)
maj_ver, min_ver, patch_ver = ver.split(".")
ctx.run("windmc --target {target_arch} -r cmd/trace-agent/windows_resources cmd/trace-agent/windows_resources/trace-agent-msg.mc".format(target_arch=windres_target))
ctx.run("windres --define MAJ_VER={maj_ver} --define MIN_VER={min_ver} --define PATCH_VER={patch_ver} -i cmd/trace-agent/windows_resources/trace-agent.rc --target {target_arch} -O coff -o cmd/trace-agent/rsrc.syso".format(
maj_ver=maj_ver,
min_ver=min_ver,
patch_ver=patch_ver,
target_arch=windres_target
))
build_include = DEFAULT_BUILD_TAGS if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
if not sys.platform.startswith('linux'):
for ex in LINUX_ONLY_TAGS:
if ex not in build_exclude:
build_exclude.append(ex)
build_tags = get_build_tags(build_include, build_exclude)
cmd = "go build {race_opt} {build_type} -tags \"{go_build_tags}\" "
cmd += "-o {agent_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/trace-agent"
args = {
"race_opt": "-race" if race else "",
"build_type": "-a" if rebuild else ("-i" if precompile_only else ""),
"go_build_tags": " ".join(build_tags),
"agent_bin": os.path.join(BIN_PATH, bin_name("trace-agent", android=False)),
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run("go generate {REPO_PATH}/pkg/trace/info".format(**args), env=env)
ctx.run(cmd.format(**args), env=env)
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
if install_deps:
deps(ctx)
test_args = {
"go_build_tags": " ".join(get_default_build_tags()),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'INTEGRATION=yes go test {race_opt} -v'.format(**test_args)
prefixes = [
"./pkg/trace/test/testsuite/...",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix))
@task
def cross_compile(ctx, tag=""):
if not tag:
print("Argument --tag=<version> is required.")
return
print("Building tag %s..." % tag)
env = {
"TRACE_AGENT_VERSION": tag,
"V": tag,
}
ctx.run("git checkout $V", env=env)
ctx.run("mkdir -p ./bin/trace-agent/$V", env=env)
ctx.run("go generate ./pkg/trace/info", env=env)
ctx.run("go get -u github.com/karalabe/xgo")
ctx.run("xgo -dest=bin/trace-agent/$V -go=1.11 -out=trace-agent-$V -targets=windows-6.1/amd64,linux/amd64,darwin-10.11/amd64 ./cmd/trace-agent", env=env)
ctx.run("mv ./bin/trace-agent/$V/trace-agent-$V-windows-6.1-amd64.exe ./bin/trace-agent/$V/trace-agent-$V-windows-amd64.exe", env=env)
ctx.run("mv ./bin/trace-agent/$V/trace-agent-$V-darwin-10.11-amd64 ./bin/trace-agent/$V/trace-agent-$V-darwin-amd64 ", env=env)
ctx.run("git checkout -")
print("Done! Binaries are located in ./bin/trace-agent/%s" % tag)
| true | true |
f7209497e72c208305cb4e1cce93a790ea4e4114 | 328 | py | Python | emoji_chengyu/main.py | alingse/emoji-chengyu | 2d4436212c1d2899dfc12a1c965ea2ddce9a4aab | [
"MIT"
] | 3 | 2020-04-28T03:25:36.000Z | 2022-01-24T04:52:01.000Z | emoji_chengyu/main.py | alingse/emoji-chengyu | 2d4436212c1d2899dfc12a1c965ea2ddce9a4aab | [
"MIT"
] | null | null | null | emoji_chengyu/main.py | alingse/emoji-chengyu | 2d4436212c1d2899dfc12a1c965ea2ddce9a4aab | [
"MIT"
] | 1 | 2020-04-28T03:25:49.000Z | 2020-04-28T03:25:49.000Z | import itertools
from emoji_chengyu.puzzle import gen_puzzle
def emoji_chengyu():
N = 100
pg = gen_puzzle()
puzzles = list(itertools.islice(pg, N))
puzzles.sort(key=lambda p: sum(p.mask), reverse=True)
M = 20
for puzzle in puzzles[:M]:
print(''.join(puzzle.puzzle), puzzle.chengyu_item.word)
| 21.866667 | 63 | 0.670732 | import itertools
from emoji_chengyu.puzzle import gen_puzzle
def emoji_chengyu():
N = 100
pg = gen_puzzle()
puzzles = list(itertools.islice(pg, N))
puzzles.sort(key=lambda p: sum(p.mask), reverse=True)
M = 20
for puzzle in puzzles[:M]:
print(''.join(puzzle.puzzle), puzzle.chengyu_item.word)
| true | true |
f72094b30996c32bb9a24f3c3252221bebecd3fa | 2,378 | py | Python | tests/technical_ratio_test.py | bopo/mooquant | 244a87d4cd8b4d918eec4f16905e0921c3b39f50 | [
"Apache-2.0"
] | 21 | 2017-09-07T16:08:21.000Z | 2020-10-15T13:42:21.000Z | tests/technical_ratio_test.py | bopo/MooQuant | 244a87d4cd8b4d918eec4f16905e0921c3b39f50 | [
"Apache-2.0"
] | 209 | 2018-10-09T11:57:39.000Z | 2021-03-25T21:40:30.000Z | tests/technical_ratio_test.py | bopo/MooQuant | 244a87d4cd8b4d918eec4f16905e0921c3b39f50 | [
"Apache-2.0"
] | 15 | 2018-11-17T20:14:37.000Z | 2022-02-04T23:55:29.000Z | # -*- coding: utf-8 -*-
# MooQuant
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
from mooquant import dataseries
from mooquant.technical import ratio
from . import common
class TestCase(common.TestCase):
def __buildRatio(self, values, ratioMaxLen=None):
seqDS = dataseries.SequenceDataSeries()
ret = ratio.Ratio(seqDS, ratioMaxLen)
for value in values:
seqDS.append(value)
return ret
def testSimple(self):
ratio = self.__buildRatio([1, 2, 1])
self.assertEqual(ratio[0], None)
self.assertEqual(ratio[1], 1)
self.assertEqual(ratio[2], -0.5)
self.assertEqual(ratio[-1], -0.5)
with self.assertRaises(IndexError):
ratio[3]
self.assertEqual(ratio[-2], ratio[1])
self.assertEqual(ratio[-1], ratio[2])
self.assertEqual(len(ratio.getDateTimes()), 3)
for i in range(len(ratio)):
self.assertEqual(ratio.getDateTimes()[i], None)
def testNegativeValues(self):
ratio = self.__buildRatio([-1, -2, -1])
self.assertEqual(ratio[0], None)
self.assertEqual(ratio[1], -1)
self.assertEqual(ratio[2], 0.5)
self.assertEqual(ratio[-1], 0.5)
with self.assertRaises(IndexError):
ratio[3]
self.assertEqual(ratio[-2], ratio[1])
self.assertEqual(ratio[-1], ratio[2])
self.assertEqual(len(ratio.getDateTimes()), 3)
for i in range(len(ratio)):
self.assertEqual(ratio.getDateTimes()[i], None)
def testBounded(self):
ratio = self.__buildRatio([-1, -2, -1], 2)
self.assertEqual(ratio[0], -1)
self.assertEqual(ratio[1], 0.5)
self.assertEqual(len(ratio), 2)
| 30.883117 | 79 | 0.647603 |
from mooquant import dataseries
from mooquant.technical import ratio
from . import common
class TestCase(common.TestCase):
def __buildRatio(self, values, ratioMaxLen=None):
seqDS = dataseries.SequenceDataSeries()
ret = ratio.Ratio(seqDS, ratioMaxLen)
for value in values:
seqDS.append(value)
return ret
def testSimple(self):
ratio = self.__buildRatio([1, 2, 1])
self.assertEqual(ratio[0], None)
self.assertEqual(ratio[1], 1)
self.assertEqual(ratio[2], -0.5)
self.assertEqual(ratio[-1], -0.5)
with self.assertRaises(IndexError):
ratio[3]
self.assertEqual(ratio[-2], ratio[1])
self.assertEqual(ratio[-1], ratio[2])
self.assertEqual(len(ratio.getDateTimes()), 3)
for i in range(len(ratio)):
self.assertEqual(ratio.getDateTimes()[i], None)
def testNegativeValues(self):
ratio = self.__buildRatio([-1, -2, -1])
self.assertEqual(ratio[0], None)
self.assertEqual(ratio[1], -1)
self.assertEqual(ratio[2], 0.5)
self.assertEqual(ratio[-1], 0.5)
with self.assertRaises(IndexError):
ratio[3]
self.assertEqual(ratio[-2], ratio[1])
self.assertEqual(ratio[-1], ratio[2])
self.assertEqual(len(ratio.getDateTimes()), 3)
for i in range(len(ratio)):
self.assertEqual(ratio.getDateTimes()[i], None)
def testBounded(self):
ratio = self.__buildRatio([-1, -2, -1], 2)
self.assertEqual(ratio[0], -1)
self.assertEqual(ratio[1], 0.5)
self.assertEqual(len(ratio), 2)
| true | true |
f72094fcc0eabe24153127ed8f8cfab3259c6ceb | 1,247 | py | Python | IRIS_data_download/IRIS_download_support/obspy/io/segy/util.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/io/segy/util.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/io/segy/util.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from struct import unpack
from obspy.core.util.libnames import _load_cdll
# Import shared libsegy
clibsegy = _load_cdll("segy")
def unpack_header_value(endian, packed_value, length, special_format):
"""
Unpacks a single value.
"""
# Use special format if necessary.
if special_format:
fmt = ('%s%s' % (endian, special_format)).encode('ascii', 'strict')
return unpack(fmt, packed_value)[0]
# Unpack according to different lengths.
elif length == 2:
format = ('%sh' % endian).encode('ascii', 'strict')
return unpack(format, packed_value)[0]
# Update: Seems to be correct. Two's complement integers seem to be
# the common way to store integer values.
elif length == 4:
format = ('%si' % endian).encode('ascii', 'strict')
return unpack(format, packed_value)[0]
# The unassigned field. Since it is unclear how this field is
# encoded it will just be stored as a string.
elif length == 8:
return packed_value
# Should not happen
else:
raise Exception
| 31.974359 | 75 | 0.652767 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import *
from struct import unpack
from obspy.core.util.libnames import _load_cdll
clibsegy = _load_cdll("segy")
def unpack_header_value(endian, packed_value, length, special_format):
if special_format:
fmt = ('%s%s' % (endian, special_format)).encode('ascii', 'strict')
return unpack(fmt, packed_value)[0]
elif length == 2:
format = ('%sh' % endian).encode('ascii', 'strict')
return unpack(format, packed_value)[0]
# the common way to store integer values.
elif length == 4:
format = ('%si' % endian).encode('ascii', 'strict')
return unpack(format, packed_value)[0]
# The unassigned field. Since it is unclear how this field is
# encoded it will just be stored as a string.
elif length == 8:
return packed_value
# Should not happen
else:
raise Exception
| true | true |
f72095d4a18d4936ed6471f607ee84100d63dad6 | 2,845 | py | Python | MMLanScan/Data/build_port_services_list.py | cyb3rc/MMLanScan | 60cf1cb9476bad8ee522780ce4df5513a139f47d | [
"MIT"
] | null | null | null | MMLanScan/Data/build_port_services_list.py | cyb3rc/MMLanScan | 60cf1cb9476bad8ee522780ce4df5513a139f47d | [
"MIT"
] | null | null | null | MMLanScan/Data/build_port_services_list.py | cyb3rc/MMLanScan | 60cf1cb9476bad8ee522780ce4df5513a139f47d | [
"MIT"
] | null | null | null | #!/usr/bin/python
import urllib2
import xml.etree.ElementTree as ElementTree
import re
def refine_table(table):
result = table
result = re.sub(r"<td.*?>", "<td>", result)
result = re.sub(r"<tr.*?>", "<tr>", result)
result = re.sub(r"<a.*?>(.*?)</a>", "\\1", result)
result = re.sub(r"<span.*?>(.*?)</span>", "\\1", result)
result = re.sub(r"<b.*?>(.*?)</b>", "\\1", result)
result = re.sub(r"<br\s?/>", "", result)
result = re.sub(r"<sup.*?/sup>", "", result)
result = re.sub(r"<sub.*?/sub>", "", result)
result = re.sub(r"<caption.*?/caption>", "", result)
result = re.sub(r"</abbr>", "", result)
result = re.sub(r"\n", "", result)
result = re.sub(r"<div.*?>.*?<li>(.*?)</li>.*?</div>", "\\1", result, re.M | re.S)
return result
def main():
response = urllib2.urlopen("https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers")
content = response.read()
tables = re.findall(r"<table class=\"wikitable sortable collapsible\">(.*?)</table>", content, re.M | re.S)
table_well_known = refine_table(tables[0])
table_registered_known = refine_table(tables[1])
whole_table = "<table>" + table_well_known + table_registered_known + "</table>"
tree = ElementTree.fromstring(whole_table)
port_info = {}
for child in tree:
port = child[0].text
tcp = child[1].text
udp = child[2].text
desc = (child[3][0].text if len(child[3]) > 0 else child[3].text).replace(",", "").replace(".", "")
# skip invalid entries
if not port:
continue
if ("Reserved" in [tcp, udp]) or ("N/A" in [tcp, udp]):
continue
# defaulting to TCP
if (not tcp and not udp):
tcp = "TCP"
elif tcp and tcp.lower() in ["yes", "assigned", "?"]:
tcp = "TCP"
if udp and udp.lower() in ["yes", "assigned", "?"]:
tcp = "TCP"
# check if given is port range
try:
port_range = map(int, port.split("-"))
except:
continue
port_range = [int(port)] if port.isdigit() else map(int, port.split("-"))
for p in port_range:
if p not in port_info:
port_info[p] = [set(),[]]
if tcp == "TCP":
port_info[p][0].add("tcp")
if udp == "UDP":
port_info[p][0].add("udp")
port_info[p][1].append(desc)
with open("services.list", "w") as fsvcs:
for port, info in sorted(port_info.items()):
for proto in sorted(info[0]):
svc = (" | ".join(info[1])).replace(u"\u00e9", "e").replace(u"\u2013", "-").replace(u"\u2014", "-")
fsvcs.write(("%s,%s,%s" % (proto, port, svc)))
fsvcs.write("\n")
if __name__ == "__main__":
main()
| 31.611111 | 115 | 0.518102 |
import urllib2
import xml.etree.ElementTree as ElementTree
import re
def refine_table(table):
result = table
result = re.sub(r"<td.*?>", "<td>", result)
result = re.sub(r"<tr.*?>", "<tr>", result)
result = re.sub(r"<a.*?>(.*?)</a>", "\\1", result)
result = re.sub(r"<span.*?>(.*?)</span>", "\\1", result)
result = re.sub(r"<b.*?>(.*?)</b>", "\\1", result)
result = re.sub(r"<br\s?/>", "", result)
result = re.sub(r"<sup.*?/sup>", "", result)
result = re.sub(r"<sub.*?/sub>", "", result)
result = re.sub(r"<caption.*?/caption>", "", result)
result = re.sub(r"</abbr>", "", result)
result = re.sub(r"\n", "", result)
result = re.sub(r"<div.*?>.*?<li>(.*?)</li>.*?</div>", "\\1", result, re.M | re.S)
return result
def main():
response = urllib2.urlopen("https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers")
content = response.read()
tables = re.findall(r"<table class=\"wikitable sortable collapsible\">(.*?)</table>", content, re.M | re.S)
table_well_known = refine_table(tables[0])
table_registered_known = refine_table(tables[1])
whole_table = "<table>" + table_well_known + table_registered_known + "</table>"
tree = ElementTree.fromstring(whole_table)
port_info = {}
for child in tree:
port = child[0].text
tcp = child[1].text
udp = child[2].text
desc = (child[3][0].text if len(child[3]) > 0 else child[3].text).replace(",", "").replace(".", "")
if not port:
continue
if ("Reserved" in [tcp, udp]) or ("N/A" in [tcp, udp]):
continue
if (not tcp and not udp):
tcp = "TCP"
elif tcp and tcp.lower() in ["yes", "assigned", "?"]:
tcp = "TCP"
if udp and udp.lower() in ["yes", "assigned", "?"]:
tcp = "TCP"
try:
port_range = map(int, port.split("-"))
except:
continue
port_range = [int(port)] if port.isdigit() else map(int, port.split("-"))
for p in port_range:
if p not in port_info:
port_info[p] = [set(),[]]
if tcp == "TCP":
port_info[p][0].add("tcp")
if udp == "UDP":
port_info[p][0].add("udp")
port_info[p][1].append(desc)
with open("services.list", "w") as fsvcs:
for port, info in sorted(port_info.items()):
for proto in sorted(info[0]):
svc = (" | ".join(info[1])).replace(u"\u00e9", "e").replace(u"\u2013", "-").replace(u"\u2014", "-")
fsvcs.write(("%s,%s,%s" % (proto, port, svc)))
fsvcs.write("\n")
if __name__ == "__main__":
main()
| true | true |
f72095f560d4de6eaf24da310f2f00ce19300c51 | 4,718 | py | Python | API/content/views.py | kasimbozdag/SWE_573 | 4bce24f98fe6980b1f2c83196b8454b56118186b | [
"MIT"
] | null | null | null | API/content/views.py | kasimbozdag/SWE_573 | 4bce24f98fe6980b1f2c83196b8454b56118186b | [
"MIT"
] | 52 | 2019-02-19T10:43:11.000Z | 2022-02-10T10:36:37.000Z | API/content/views.py | kasimbozdag/SWE_573 | 4bce24f98fe6980b1f2c83196b8454b56118186b | [
"MIT"
] | null | null | null | import datetime
from django.shortcuts import render, get_object_or_404
# Create your views here.
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from rest_framework.views import APIView
from content.models import Content
from content.serializers import ContentSerializer, ContentsSerializer, ContentsDetailSerializer
from lesson.models import Lesson, Contents
class ContentCreateAPIView(APIView):
def post(self, request, *args, **kwargs):
user = request.user
data = request.data
lesson = get_object_or_404(Lesson, pk=kwargs.get("pk"))
content = {'text': data['text'], "sub_title": data['title']}
if "file" in request.FILES:
file = request.FILES['file']
content['file'] = file
serializer = ContentSerializer(data=content)
serializer.is_valid(raise_exception=True)
serializer.save()
place = Contents.objects.filter(lesson=lesson.id).count() + 1
contents = {
"lesson": kwargs.get("pk"),
"content": serializer.instance.pk,
"owner": user.pk,
"place": place,
}
serializer = ContentsSerializer(data=contents)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=HTTP_200_OK)
class ContentsListAPIView(ListAPIView):
serializer_class = ContentsDetailSerializer
queryset = Contents.objects.all()
def get_queryset(self):
return Contents.objects.filter(lesson=self.kwargs.get("pk"), is_active=True).order_by("place")
class ContentsAPIView(APIView):
def put(self, request, *args, **kwargs):
user = request.user
data = request.data
pk = kwargs['pk']
contents = get_object_or_404(Contents, pk=pk)
if not contents.is_authorized(request.user):
return Response(status=401)
content = contents.content
content_data = {'text': data['text'], "last_edited_at": datetime.datetime.now(), "sub_title": data['title']}
if "file" in request.FILES:
file = request.FILES['file']
content_data['file'] = file
serializer = ContentSerializer(content, data=content_data)
serializer.is_valid(raise_exception=True)
serializer.save()
contents_data = {
"lesson": contents.lesson_id,
"content": content.pk,
"owner": contents.owner.pk,
"last_edited_at": datetime.datetime.now(),
}
serializer = ContentsSerializer(contents, data=contents_data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=HTTP_200_OK)
def get(self, request, *args, **kwargs):
pk = kwargs['pk']
contents = get_object_or_404(Contents, pk=pk)
serializer = ContentsDetailSerializer(contents)
return Response(serializer.data, status=HTTP_200_OK)
class ContentsInactivateAPIView(APIView):
def put(self, request, *args, **kwargs):
pk = kwargs['pk']
content = get_object_or_404(Contents, pk=pk)
if not content.is_authorized(request.user):
return Response(status=401)
content.is_active = False
content.save()
return Response(status=HTTP_200_OK)
class ContentsActivateAPIView(APIView):
def put(self, request, *args, **kwargs):
pk = kwargs['pk']
content = get_object_or_404(Contents, pk=pk)
if not content.is_authorized(request.user):
return Response(status=401)
content.is_active = True
content.save()
return Response(status=HTTP_200_OK)
class TeacherContentsListAPIView(ListAPIView):
serializer_class = ContentsDetailSerializer
queryset = Contents.objects.all()
def get_queryset(self):
return Contents.objects.filter(lesson=self.kwargs.get("pk")).order_by("place")
class ChangePlaceAPIView(APIView):
def put(self, request, *args, **kwargs):
pk = kwargs['pk']
content = get_object_or_404(Contents, pk=pk)
if not content.is_authorized(request.user):
return Response(status=401)
place=request.data['place']
lte=content.place
gte=place
change=1
if lte < gte:
lte=place
gte=content.place
change=-1
contents=Contents.objects.filter(place__gte=gte).filter(place__lte=lte)
for l in contents:
l.place=l.place+change
l.save()
content.place=place
content.save()
return Response(status=HTTP_200_OK) | 35.473684 | 116 | 0.652183 | import datetime
from django.shortcuts import render, get_object_or_404
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from rest_framework.views import APIView
from content.models import Content
from content.serializers import ContentSerializer, ContentsSerializer, ContentsDetailSerializer
from lesson.models import Lesson, Contents
class ContentCreateAPIView(APIView):
def post(self, request, *args, **kwargs):
user = request.user
data = request.data
lesson = get_object_or_404(Lesson, pk=kwargs.get("pk"))
content = {'text': data['text'], "sub_title": data['title']}
if "file" in request.FILES:
file = request.FILES['file']
content['file'] = file
serializer = ContentSerializer(data=content)
serializer.is_valid(raise_exception=True)
serializer.save()
place = Contents.objects.filter(lesson=lesson.id).count() + 1
contents = {
"lesson": kwargs.get("pk"),
"content": serializer.instance.pk,
"owner": user.pk,
"place": place,
}
serializer = ContentsSerializer(data=contents)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=HTTP_200_OK)
class ContentsListAPIView(ListAPIView):
serializer_class = ContentsDetailSerializer
queryset = Contents.objects.all()
def get_queryset(self):
return Contents.objects.filter(lesson=self.kwargs.get("pk"), is_active=True).order_by("place")
class ContentsAPIView(APIView):
def put(self, request, *args, **kwargs):
user = request.user
data = request.data
pk = kwargs['pk']
contents = get_object_or_404(Contents, pk=pk)
if not contents.is_authorized(request.user):
return Response(status=401)
content = contents.content
content_data = {'text': data['text'], "last_edited_at": datetime.datetime.now(), "sub_title": data['title']}
if "file" in request.FILES:
file = request.FILES['file']
content_data['file'] = file
serializer = ContentSerializer(content, data=content_data)
serializer.is_valid(raise_exception=True)
serializer.save()
contents_data = {
"lesson": contents.lesson_id,
"content": content.pk,
"owner": contents.owner.pk,
"last_edited_at": datetime.datetime.now(),
}
serializer = ContentsSerializer(contents, data=contents_data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=HTTP_200_OK)
def get(self, request, *args, **kwargs):
pk = kwargs['pk']
contents = get_object_or_404(Contents, pk=pk)
serializer = ContentsDetailSerializer(contents)
return Response(serializer.data, status=HTTP_200_OK)
class ContentsInactivateAPIView(APIView):
def put(self, request, *args, **kwargs):
pk = kwargs['pk']
content = get_object_or_404(Contents, pk=pk)
if not content.is_authorized(request.user):
return Response(status=401)
content.is_active = False
content.save()
return Response(status=HTTP_200_OK)
class ContentsActivateAPIView(APIView):
def put(self, request, *args, **kwargs):
pk = kwargs['pk']
content = get_object_or_404(Contents, pk=pk)
if not content.is_authorized(request.user):
return Response(status=401)
content.is_active = True
content.save()
return Response(status=HTTP_200_OK)
class TeacherContentsListAPIView(ListAPIView):
serializer_class = ContentsDetailSerializer
queryset = Contents.objects.all()
def get_queryset(self):
return Contents.objects.filter(lesson=self.kwargs.get("pk")).order_by("place")
class ChangePlaceAPIView(APIView):
def put(self, request, *args, **kwargs):
pk = kwargs['pk']
content = get_object_or_404(Contents, pk=pk)
if not content.is_authorized(request.user):
return Response(status=401)
place=request.data['place']
lte=content.place
gte=place
change=1
if lte < gte:
lte=place
gte=content.place
change=-1
contents=Contents.objects.filter(place__gte=gte).filter(place__lte=lte)
for l in contents:
l.place=l.place+change
l.save()
content.place=place
content.save()
return Response(status=HTTP_200_OK) | true | true |
f72095f7242e0943bddfcfc8d4c0d806cf6d4b17 | 1,274 | py | Python | build/lib/elang/plot/utils/embedding.py | onlyphantom/elangdev | bdb80e10e98f98ef6510c313cda55daf9464d5c4 | [
"CC0-1.0"
] | 5 | 2020-02-26T15:05:47.000Z | 2022-01-25T01:15:27.000Z | build/lib/elang/plot/utils/embedding.py | onlyphantom/elangdev | bdb80e10e98f98ef6510c313cda55daf9464d5c4 | [
"CC0-1.0"
] | null | null | null | build/lib/elang/plot/utils/embedding.py | onlyphantom/elangdev | bdb80e10e98f98ef6510c313cda55daf9464d5c4 | [
"CC0-1.0"
] | 1 | 2020-02-13T08:14:11.000Z | 2020-02-13T08:14:11.000Z | import sys, os.path
import gensim
from gensim.models import Word2Vec
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
def plot2d_demo(model, words=None):
assert (
model.vector_size >= 2
), "This function expects a model of size 2 (2-dimension word vectors) or higher."
if words is None:
words = [words for words in model.wv.vocab]
word_vec = np.array([model.wv[word] for word in words])
if model.vector_size > 2:
pca = PCA(2)
word_vec = pca.fit_transform(word_vec)
with plt.style.context("seaborn-pastel"):
plt.figure(figsize=(7, 5), dpi=180)
plt.scatter(word_vec[:, 0], word_vec[:, 1], s=5, edgecolors="k", c="c")
for word, (x, y) in zip(words, word_vec):
plt.text(x - 0.02, y + 0.02, word, fontsize=5)
plt.show()
if __name__ == "__main__":
MODEL_PATH = (
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
+ "/word2vec/model/demo2d.model"
# + "/word2vec/model/demo500d.model"
)
model = Word2Vec.load(MODEL_PATH)
print("Loaded from Path:", MODEL_PATH, "\n", model)
# plot2d_demo(model, words=["bca", "mandiri", "uob", "algoritma", "airbnb"])
plot2d_demo(model)
| 28.311111 | 86 | 0.625589 | import sys, os.path
import gensim
from gensim.models import Word2Vec
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
def plot2d_demo(model, words=None):
assert (
model.vector_size >= 2
), "This function expects a model of size 2 (2-dimension word vectors) or higher."
if words is None:
words = [words for words in model.wv.vocab]
word_vec = np.array([model.wv[word] for word in words])
if model.vector_size > 2:
pca = PCA(2)
word_vec = pca.fit_transform(word_vec)
with plt.style.context("seaborn-pastel"):
plt.figure(figsize=(7, 5), dpi=180)
plt.scatter(word_vec[:, 0], word_vec[:, 1], s=5, edgecolors="k", c="c")
for word, (x, y) in zip(words, word_vec):
plt.text(x - 0.02, y + 0.02, word, fontsize=5)
plt.show()
if __name__ == "__main__":
MODEL_PATH = (
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
+ "/word2vec/model/demo2d.model"
)
model = Word2Vec.load(MODEL_PATH)
print("Loaded from Path:", MODEL_PATH, "\n", model)
plot2d_demo(model)
| true | true |
f720966df99b4facf9ee616398c70c34a8e38598 | 49,071 | py | Python | tensorflow/python/data/experimental/kernel_tests/snapshot_test.py | weikhor/tensorflow | ce047fc05c7b5ff54868ba53d724d9c171c4adbb | [
"Apache-2.0"
] | 10 | 2021-05-25T17:43:04.000Z | 2022-03-08T10:46:09.000Z | tensorflow/python/data/experimental/kernel_tests/snapshot_test.py | weikhor/tensorflow | ce047fc05c7b5ff54868ba53d724d9c171c4adbb | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | tensorflow/python/data/experimental/kernel_tests/snapshot_test.py | weikhor/tensorflow | ce047fc05c7b5ff54868ba53d724d9c171c4adbb | [
"Apache-2.0"
] | 6 | 2016-09-07T04:00:15.000Z | 2022-01-12T01:47:38.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `SnapshotDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.kernel_tests import tf_record_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def is_graphdef_file(filename):
return filename.endswith("-graph.pbtxt")
def is_temp_file(filename):
return "-tmp-" in filename
def listdir_and_filter(dirname, filter_fn):
return [path for path in sorted(os.listdir(dirname)) if filter_fn(path)]
class SnapshotTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(SnapshotTest, self).setUp()
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
self._snapshot_dir = tmpdir
def tearDown(self):
super(SnapshotTest, self).tearDown()
shutil.rmtree(self._snapshot_dir)
def createTFRecords(self, num_files=10, num_records=100):
self._num_files = num_files
self._num_records = num_records
self._filenames = self._createFiles()
def removeTFRecords(self):
for filename in self._filenames:
os.remove(filename)
self._filenames = []
self._num_files = None
self._num_records = None
def assertDatasetProducesSet(self, dataset, expected):
actual = []
next_fn = self.getNext(dataset)
for _ in range(len(expected)):
elem = self.evaluate(next_fn())
actual.append(elem)
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
def assertSnapshotDirectoryContains(self, directory, num_fingerprints,
num_runs_per_fingerprint,
num_snapshot_shards_per_run):
# Ignore the graphdef pbtxts we write for debugging purposes and temporary
# files that are an artifact of how TF writes files.
dirlist = listdir_and_filter(
directory,
lambda p: not (is_graphdef_file(p) or is_temp_file(p)))
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = listdir_and_filter(
fingerprint_dir, lambda p: not is_temp_file(p))
self.assertLen(fingerprint_dir_list, num_runs_per_fingerprint + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fingerprint],
"snapshot.metadata")
for j in range(num_runs_per_fingerprint):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_shards_per_run)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.shard" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testCreateSnapshotDataset(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3])
dataset.apply(snapshot.snapshot(self._snapshot_dir))
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetDefault(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetAutoWriteSnappyRead(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, compression="AUTO"))
self.assertDatasetProduces(dataset, expected)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(self._snapshot_dir, compression="SNAPPY"))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetCustomShardFn(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: np.int64(0)))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=1)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: 0))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetCustomReaderFn(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(
self._snapshot_dir,
reader_func=(
lambda ds: ds.interleave( # pylint:disable=g-long-lambda
lambda x: x,
cycle_length=4,
num_parallel_calls=4))))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(
self._snapshot_dir,
reader_func=(
lambda ds: ds.interleave( # pylint:disable=g-long-lambda
lambda x: x,
cycle_length=4,
num_parallel_calls=4))))
self.assertDatasetProducesSet(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testSnapshotDatasetInvalidShardFn(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaises(TypeError):
dataset = dataset.apply(
snapshot.snapshot(
self._snapshot_dir, shard_func=lambda _: "invalid_fn"))
next_fn = self.getNext(dataset)
self.evaluate(next_fn())
@combinations.generate(test_base.default_test_combinations())
def testSnapshotDatasetInvalidReaderFn(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaises(TypeError):
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, reader_func=lambda x: x + 1))
next_fn = self.getNext(dataset)
self.evaluate(next_fn())
@combinations.generate(test_base.default_test_combinations())
def testRoundtripEmptySnapshot(self):
dataset = dataset_ops.Dataset.range(0)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, [])
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=0)
dataset2 = dataset_ops.Dataset.range(0)
dataset2 = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, [])
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSimple(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetMultipleFingerprints(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset1, list(range(1000)))
dataset2 = dataset_ops.Dataset.range(2000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, list(range(2000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=2,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSameFingerprintMultipleCompleteRuns(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset1, list(range(1000)))
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
next1 = self.getNext(dataset1)
for i in range(500):
self.assertEqual(i, self.evaluate(next1()))
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
next2 = self.getNext(dataset2)
for i in range(500):
self.assertEqual(i, self.evaluate(next2()))
for i in range(500, 1000):
self.assertEqual(i, self.evaluate(next1()))
self.assertEqual(i, self.evaluate(next2()))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=2,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotCustomShardFunction(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.enumerate()
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda i, _: i % 2))
dataset = dataset.map(lambda _, elem: elem)
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=2)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetWithTuples(self):
dataset1 = dataset_ops.Dataset.range(0, 1000)
dataset2 = dataset_ops.Dataset.range(1000, 2000)
dataset3 = dataset_ops.Dataset.range(2000, 3000)
dataset4 = dataset_ops.Dataset.range(3000, 4000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2, dataset3, dataset4))
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
expected = list(
zip(
range(0, 1000), range(1000, 2000), range(2000, 3000),
range(3000, 4000)))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotShuffleSameFingerprint(self):
def make_dataset():
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.shuffle(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
return dataset
dataset1 = make_dataset()
self.assertDatasetProducesSet(dataset1, list(range(1000)))
dataset2 = make_dataset()
self.assertDatasetProducesSet(dataset2, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testReadUsingFlatMap(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, list(range(1000)))
flat_map = dataset_ops.Dataset.from_tensors(dataset).flat_map(lambda x: x)
self.assertDatasetProduces(flat_map, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testReadOptimizableUsingFlatMap(self):
dataset = dataset_ops.Dataset.range(1000)
# Will be optimized into ShuffleAndRepeat.
dataset = dataset.shuffle(10)
dataset = dataset.repeat(2)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProducesSet(dataset, 2 * list(range(1000)))
flat_map = dataset_ops.Dataset.from_tensors(dataset).flat_map(lambda x: x)
self.assertDatasetProducesSet(flat_map, 2 * list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testRepeatAndPrefetch(self):
"""This test reproduces github.com/tensorflow/tensorflow/issues/48903."""
dataset = dataset_ops.Dataset.from_tensor_slices(np.random.rand(16, 32))
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
dataset = dataset.shuffle(buffer_size=16)
dataset = dataset.batch(16)
dataset = dataset.repeat()
dataset = dataset.prefetch(1)
next_element = self.getNext(dataset)
for _ in range(30):
self.evaluate(next_element())
class LegacySnapshotTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(LegacySnapshotTest, self).setUp()
self.removeTFRecords()
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
self.snapshot_dir = tmpdir
def tearDown(self):
super(LegacySnapshotTest, self).tearDown()
shutil.rmtree(self.snapshot_dir)
def removeTFRecords(self):
for filename in self._filenames:
os.remove(filename)
self._filenames = []
def setUpTFRecord(self, num_files=10, num_records=10):
self._num_files = num_files
self._num_records = num_records
self._filenames = self._createFiles()
def makeSnapshotDirectory(self):
return self.snapshot_dir
def assertSnapshotDirectoryContains(self, directory, num_fingerprints,
num_runs_per_fp, num_snapshot_files):
# Ignore the graphdef pbtxts we write for debugging purposes and temporary
# files that are an artifact of how TF writes files.
dirlist = listdir_and_filter(
directory,
lambda p: not (is_graphdef_file(p) or is_temp_file(p)))
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = listdir_and_filter(
fingerprint_dir, lambda p: not is_temp_file(p))
self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fp],
"snapshot.metadata")
for j in range(num_runs_per_fp):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_files)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.snapshot" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testWriteDifferentPipelinesInOneDirectory(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1000)))
dataset = dataset_ops.Dataset.range(1001)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1001)))
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotMultipleSimultaneous(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
next2 = self.getNext(dataset2)
for i in range(0, 1000):
self.assertEqual(i, self.evaluate(next1()))
self.assertEqual(i, self.evaluate(next2()))
# we check that only one copy of the metadata has been written, and the
# one that lost the race would be in passthrough mode.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testGetNextCreatesDir(self):
tmpdir = self.snapshot_dir
# We create two iterators but call getNext on only one.
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1001)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
_ = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next1())
# We check that only one directory is created.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotSimpleSuccessful(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotRepeatAfterwards(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotMixTypes(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
def map_fn(x):
return (x, string_ops.as_string(x), string_ops.as_string(2 * x), 2 * x)
dataset = dataset.map(map_fn)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
expected = []
for i in range(10):
expected.append((i, str(i), str(2 * i), 2 * i))
self.assertDatasetProduces(dataset, expected * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testSpecifySnapshotNameWriteAndRead(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, snapshot_name="my_custom_snapshot"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
self.assertTrue(
os.path.exists(os.path.join(tmpdir, "custom-my_custom_snapshot")))
self.assertTrue(
os.path.exists(
os.path.join(tmpdir, "custom-my_custom_snapshot", "custom")))
@combinations.generate(test_base.default_test_combinations())
def testForcePassthroughMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, mode="passthrough"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 0, 0, 0)
@combinations.generate(test_base.default_test_combinations())
def testForceWriteMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="write"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
# We will end up writing 10 different runs.
self.assertSnapshotDirectoryContains(tmpdir, 1, 10, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadMode(self):
tmpdir = self.snapshot_dir
# We write a copy of the snapshot first.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="write", snapshot_name="my_custom_snapshot"))
self.assertDatasetProduces(dataset, list(range(10)))
# We move the run to a new name.
shutil.move(
os.path.join(tmpdir, "custom-my_custom_snapshot"),
os.path.join(tmpdir, "custom-my_custom_snapshot_2"))
# Even though the snapshot.metadata is pointing to the old run that no
# longer exists after we moved, we force it to read from the run we specify.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_custom_snapshot_2"))
self.assertDatasetProduces(dataset, list(range(10)))
# We should still have one snapshot and one run.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="read"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentNamedSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_nonexistent_snapshot"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotBackAfterWrite(self, compression):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=100))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
next2 = self.getNext(dataset2)
res1 = self.evaluate(next2())
res2 = self.evaluate(next2())
res3 = self.evaluate(next2())
res4 = self.evaluate(next2())
res5 = self.evaluate(next2())
# make sure that we don't read the file back in the same order.
self.assertNotEqual([res1, res2, res3, res4, res5], expected[0:5])
# make sure all the elements are still there
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
self.assertDatasetProduces(dataset3, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotWithSeedAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next2 = self.getNext(dataset2)
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next3 = self.getNext(dataset3)
# make sure that the items are read back in the same order for both datasets
for _ in range(500):
res2 = self.evaluate(next2())
res3 = self.evaluate(next3())
self.assertEqual(res2, res3)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotParallelAfterWrite(self, compression):
self.setUpTFRecord(5, 500)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 5)
for r in range(0, 500)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset, expected, assert_items_equal=True)
# remove the original files and try to read the data back only from
# snapshot.
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
# Not testing Snappy here because Snappy reads currently require a lot of
# memory.
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP
]),
combinations.combine(threads=2, size=[1, 2]) +
combinations.combine(threads=8, size=[1, 4, 8]))))
def testReadSnapshotBackAfterMultiThreadedWrite(self, compression, threads,
size):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
compression=compression,
num_writer_threads=threads,
writer_buffer_size=size))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from
# snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testSameFingerprintWithDifferentInitializationOrder(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(0, 100)
dataset2 = dataset_ops.Dataset.range(100, 200)
dataset3 = dataset_ops.Dataset.range(200, 300)
dataset = dataset1.concatenate(dataset2).concatenate(dataset3)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
dataset4 = dataset_ops.Dataset.range(200, 300)
dataset5 = dataset_ops.Dataset.range(100, 200)
dataset6 = dataset_ops.Dataset.range(0, 100)
dataset = dataset6.concatenate(dataset5).concatenate(dataset4)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testExpiredSnapshotRewrite(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next1 = self.getNext(dataset1)
# Don't finish reading dataset1, so it is never finalized
for _ in range(500):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
time.sleep(2)
# Creating dataset2 after we run through dataset1 due to eager mode, where
# the snapshot state is determined immediately upon dataset creation. We
# only want to determine the snapshot state for dataset2 after the first
# snapshot has expired.
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next2 = self.getNext(dataset2)
for _ in range(500):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1)
@combinations.generate(test_base.default_test_combinations())
def testSnapshotArgsCreateNewSnapshot(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10000))
next1 = self.getNext(dataset1)
for _ in range(1000):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
# Create second snapshot with a different shard_size_bytes
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=20000))
next2 = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testSpecifyShardSize(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
dataset = dataset.repeat(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=10 * 1024 * 1024, compression=compression))
next_fn = self.getNext(dataset)
for _ in range(10):
self.evaluate(next_fn())
num_files = 1
if compression == snapshot.COMPRESSION_NONE:
num_files = 3
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, num_files)
@combinations.generate(test_base.default_test_combinations())
def testAdditionalOperationsAfterReadBack(self):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset2, expected)
expected_after = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(snapshot.legacy_snapshot(tmpdir))
dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000))
self.assertDatasetProduces(dataset3, expected_after)
class SnapshotCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_snapshot_dataset(self, repeat=False):
def ds_fn():
self._snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self._snapshot_dir):
os.mkdir(self._snapshot_dir)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
if repeat:
dataset = dataset.repeat(2)
return dataset
return ds_fn
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeEpochEndNoRepeat(self):
ds_fn = self._build_snapshot_dataset(repeat=False)
outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(50))
outputs.extend(
self.gen_outputs(ds_fn, [], 50, ckpt_saved=True, verify_exhausted=True))
self.assertSequenceEqual(outputs, range(100))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeOneEpochWithReading(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
# Generate 50 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(50)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(ds_fn, [], 150, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(50)) + list(range(50, 100)) + list(range(100)))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeOneEpochThenRunAFewSteps(self):
ds_fn = self._build_snapshot_dataset(repeat=False)
outputs = self.gen_outputs(
ds_fn, [10], 20, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(20))
outputs = outputs[:10]
outputs.extend(
self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True))
self.assertSequenceEqual(outputs, range(100))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointAfterOneEpoch(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
# Generate 110 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 110, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(100)) + list(range(10)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(100)) + list(range(10)) + list(range(10, 100)))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointAfterOneEpochRunFewSteps(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
# Generate 120 entries from iterator and save checkpoint at 110.
outputs = self.gen_outputs(
ds_fn, [110], 120, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(100)) + list(range(20)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs = outputs[:110]
t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(100)) + list(range(10)) + list(range(10, 100)))
class LegacySnapshotCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_snapshot_dataset(self,
num_threads=1,
repeat=False,
pending_snapshot_expiry_seconds=-1,
shard_size_bytes=None):
def ds_fn():
self.snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self.snapshot_dir):
os.mkdir(self.snapshot_dir)
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(
self.snapshot_dir,
num_writer_threads=num_threads,
writer_buffer_size=2 * num_threads,
num_reader_threads=num_threads,
reader_buffer_size=2 * num_threads,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=shard_size_bytes))
if repeat:
dataset = dataset.repeat(2)
return dataset
return ds_fn
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testSnapshotBeforeEpochEnd(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(ds_fn, [], 100, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(100))
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsSmallShardMultiThread(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=100)
outputs = []
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(ds_fn)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
start = 0
end = 100
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self._save(sess, saver)
start = 100
end = 400
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self.assertSequenceEqual(outputs, range(400))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
fp_dir_list = os.listdir(self.snapshot_dir)
self.assertLen(list(fp_dir_list), 2)
for d in fp_dir_list:
if not d.endswith("-graph.pbtxt"):
fp_dir = os.path.join(self.snapshot_dir, d)
run_dir_list = os.listdir(fp_dir)
self.assertLen(list(run_dir_list), 2)
for e in run_dir_list:
if e != "snapshot.metadata":
run_dir = os.path.join(fp_dir, e)
self.assertLen(list(os.listdir(run_dir)), 258)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsMultipleThreads(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
num_threads=2,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpoch(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 1100 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 1100, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(1000)) + list(range(100)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(900)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [1100],
1200,
verify_exhausted=False,
save_checkpoint_at_end=False)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(100)))
outputs = outputs[:1100]
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs, (list(range(1000)) + list(range(100)) + list(range(900))))
if __name__ == "__main__":
test.main()
| 37.834233 | 82 | 0.708259 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.kernel_tests import tf_record_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def is_graphdef_file(filename):
return filename.endswith("-graph.pbtxt")
def is_temp_file(filename):
return "-tmp-" in filename
def listdir_and_filter(dirname, filter_fn):
return [path for path in sorted(os.listdir(dirname)) if filter_fn(path)]
class SnapshotTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(SnapshotTest, self).setUp()
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
self._snapshot_dir = tmpdir
def tearDown(self):
super(SnapshotTest, self).tearDown()
shutil.rmtree(self._snapshot_dir)
def createTFRecords(self, num_files=10, num_records=100):
self._num_files = num_files
self._num_records = num_records
self._filenames = self._createFiles()
def removeTFRecords(self):
for filename in self._filenames:
os.remove(filename)
self._filenames = []
self._num_files = None
self._num_records = None
def assertDatasetProducesSet(self, dataset, expected):
actual = []
next_fn = self.getNext(dataset)
for _ in range(len(expected)):
elem = self.evaluate(next_fn())
actual.append(elem)
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
def assertSnapshotDirectoryContains(self, directory, num_fingerprints,
num_runs_per_fingerprint,
num_snapshot_shards_per_run):
dirlist = listdir_and_filter(
directory,
lambda p: not (is_graphdef_file(p) or is_temp_file(p)))
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = listdir_and_filter(
fingerprint_dir, lambda p: not is_temp_file(p))
self.assertLen(fingerprint_dir_list, num_runs_per_fingerprint + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fingerprint],
"snapshot.metadata")
for j in range(num_runs_per_fingerprint):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_shards_per_run)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.shard" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testCreateSnapshotDataset(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3])
dataset.apply(snapshot.snapshot(self._snapshot_dir))
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetDefault(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetAutoWriteSnappyRead(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, compression="AUTO"))
self.assertDatasetProduces(dataset, expected)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(self._snapshot_dir, compression="SNAPPY"))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetCustomShardFn(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: np.int64(0)))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=1)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: 0))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetCustomReaderFn(self):
self.createTFRecords()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(
self._snapshot_dir,
reader_func=(
lambda ds: ds.interleave(
lambda x: x,
cycle_length=4,
num_parallel_calls=4))))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(
self._snapshot_dir,
reader_func=(
lambda ds: ds.interleave(
lambda x: x,
cycle_length=4,
num_parallel_calls=4))))
self.assertDatasetProducesSet(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testSnapshotDatasetInvalidShardFn(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaises(TypeError):
dataset = dataset.apply(
snapshot.snapshot(
self._snapshot_dir, shard_func=lambda _: "invalid_fn"))
next_fn = self.getNext(dataset)
self.evaluate(next_fn())
@combinations.generate(test_base.default_test_combinations())
def testSnapshotDatasetInvalidReaderFn(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaises(TypeError):
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, reader_func=lambda x: x + 1))
next_fn = self.getNext(dataset)
self.evaluate(next_fn())
@combinations.generate(test_base.default_test_combinations())
def testRoundtripEmptySnapshot(self):
dataset = dataset_ops.Dataset.range(0)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, [])
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=0)
dataset2 = dataset_ops.Dataset.range(0)
dataset2 = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, [])
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSimple(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetMultipleFingerprints(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset1, list(range(1000)))
dataset2 = dataset_ops.Dataset.range(2000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, list(range(2000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=2,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSameFingerprintMultipleCompleteRuns(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset1, list(range(1000)))
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
next1 = self.getNext(dataset1)
for i in range(500):
self.assertEqual(i, self.evaluate(next1()))
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
next2 = self.getNext(dataset2)
for i in range(500):
self.assertEqual(i, self.evaluate(next2()))
for i in range(500, 1000):
self.assertEqual(i, self.evaluate(next1()))
self.assertEqual(i, self.evaluate(next2()))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=2,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotCustomShardFunction(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.enumerate()
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda i, _: i % 2))
dataset = dataset.map(lambda _, elem: elem)
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=2)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetWithTuples(self):
dataset1 = dataset_ops.Dataset.range(0, 1000)
dataset2 = dataset_ops.Dataset.range(1000, 2000)
dataset3 = dataset_ops.Dataset.range(2000, 3000)
dataset4 = dataset_ops.Dataset.range(3000, 4000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2, dataset3, dataset4))
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
expected = list(
zip(
range(0, 1000), range(1000, 2000), range(2000, 3000),
range(3000, 4000)))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotShuffleSameFingerprint(self):
def make_dataset():
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.shuffle(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
return dataset
dataset1 = make_dataset()
self.assertDatasetProducesSet(dataset1, list(range(1000)))
dataset2 = make_dataset()
self.assertDatasetProducesSet(dataset2, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testReadUsingFlatMap(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, list(range(1000)))
flat_map = dataset_ops.Dataset.from_tensors(dataset).flat_map(lambda x: x)
self.assertDatasetProduces(flat_map, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testReadOptimizableUsingFlatMap(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.shuffle(10)
dataset = dataset.repeat(2)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProducesSet(dataset, 2 * list(range(1000)))
flat_map = dataset_ops.Dataset.from_tensors(dataset).flat_map(lambda x: x)
self.assertDatasetProducesSet(flat_map, 2 * list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testRepeatAndPrefetch(self):
dataset = dataset_ops.Dataset.from_tensor_slices(np.random.rand(16, 32))
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
dataset = dataset.shuffle(buffer_size=16)
dataset = dataset.batch(16)
dataset = dataset.repeat()
dataset = dataset.prefetch(1)
next_element = self.getNext(dataset)
for _ in range(30):
self.evaluate(next_element())
class LegacySnapshotTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(LegacySnapshotTest, self).setUp()
self.removeTFRecords()
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
self.snapshot_dir = tmpdir
def tearDown(self):
super(LegacySnapshotTest, self).tearDown()
shutil.rmtree(self.snapshot_dir)
def removeTFRecords(self):
for filename in self._filenames:
os.remove(filename)
self._filenames = []
def setUpTFRecord(self, num_files=10, num_records=10):
self._num_files = num_files
self._num_records = num_records
self._filenames = self._createFiles()
def makeSnapshotDirectory(self):
return self.snapshot_dir
def assertSnapshotDirectoryContains(self, directory, num_fingerprints,
num_runs_per_fp, num_snapshot_files):
dirlist = listdir_and_filter(
directory,
lambda p: not (is_graphdef_file(p) or is_temp_file(p)))
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = listdir_and_filter(
fingerprint_dir, lambda p: not is_temp_file(p))
self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fp],
"snapshot.metadata")
for j in range(num_runs_per_fp):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_files)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.snapshot" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testWriteDifferentPipelinesInOneDirectory(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1000)))
dataset = dataset_ops.Dataset.range(1001)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1001)))
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotMultipleSimultaneous(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
next2 = self.getNext(dataset2)
for i in range(0, 1000):
self.assertEqual(i, self.evaluate(next1()))
self.assertEqual(i, self.evaluate(next2()))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testGetNextCreatesDir(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1001)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
_ = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotSimpleSuccessful(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotRepeatAfterwards(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotMixTypes(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
def map_fn(x):
return (x, string_ops.as_string(x), string_ops.as_string(2 * x), 2 * x)
dataset = dataset.map(map_fn)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
expected = []
for i in range(10):
expected.append((i, str(i), str(2 * i), 2 * i))
self.assertDatasetProduces(dataset, expected * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testSpecifySnapshotNameWriteAndRead(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, snapshot_name="my_custom_snapshot"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
self.assertTrue(
os.path.exists(os.path.join(tmpdir, "custom-my_custom_snapshot")))
self.assertTrue(
os.path.exists(
os.path.join(tmpdir, "custom-my_custom_snapshot", "custom")))
@combinations.generate(test_base.default_test_combinations())
def testForcePassthroughMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, mode="passthrough"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 0, 0, 0)
@combinations.generate(test_base.default_test_combinations())
def testForceWriteMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="write"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 10, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="write", snapshot_name="my_custom_snapshot"))
self.assertDatasetProduces(dataset, list(range(10)))
shutil.move(
os.path.join(tmpdir, "custom-my_custom_snapshot"),
os.path.join(tmpdir, "custom-my_custom_snapshot_2"))
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_custom_snapshot_2"))
self.assertDatasetProduces(dataset, list(range(10)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="read"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentNamedSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_nonexistent_snapshot"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotBackAfterWrite(self, compression):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, expected)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=100))
self.assertDatasetProduces(dataset, expected)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
next2 = self.getNext(dataset2)
res1 = self.evaluate(next2())
res2 = self.evaluate(next2())
res3 = self.evaluate(next2())
res4 = self.evaluate(next2())
res5 = self.evaluate(next2())
self.assertNotEqual([res1, res2, res3, res4, res5], expected[0:5])
# make sure all the elements are still there
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
self.assertDatasetProduces(dataset3, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotWithSeedAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next2 = self.getNext(dataset2)
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next3 = self.getNext(dataset3)
# make sure that the items are read back in the same order for both datasets
for _ in range(500):
res2 = self.evaluate(next2())
res3 = self.evaluate(next3())
self.assertEqual(res2, res3)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotParallelAfterWrite(self, compression):
self.setUpTFRecord(5, 500)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 5)
for r in range(0, 500)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset, expected, assert_items_equal=True)
# remove the original files and try to read the data back only from
# snapshot.
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
# Not testing Snappy here because Snappy reads currently require a lot of
# memory.
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP
]),
combinations.combine(threads=2, size=[1, 2]) +
combinations.combine(threads=8, size=[1, 4, 8]))))
def testReadSnapshotBackAfterMultiThreadedWrite(self, compression, threads,
size):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
compression=compression,
num_writer_threads=threads,
writer_buffer_size=size))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from
# snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testSameFingerprintWithDifferentInitializationOrder(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(0, 100)
dataset2 = dataset_ops.Dataset.range(100, 200)
dataset3 = dataset_ops.Dataset.range(200, 300)
dataset = dataset1.concatenate(dataset2).concatenate(dataset3)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
dataset4 = dataset_ops.Dataset.range(200, 300)
dataset5 = dataset_ops.Dataset.range(100, 200)
dataset6 = dataset_ops.Dataset.range(0, 100)
dataset = dataset6.concatenate(dataset5).concatenate(dataset4)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testExpiredSnapshotRewrite(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next1 = self.getNext(dataset1)
# Don't finish reading dataset1, so it is never finalized
for _ in range(500):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
time.sleep(2)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next2 = self.getNext(dataset2)
for _ in range(500):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1)
@combinations.generate(test_base.default_test_combinations())
def testSnapshotArgsCreateNewSnapshot(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10000))
next1 = self.getNext(dataset1)
for _ in range(1000):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=20000))
next2 = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testSpecifyShardSize(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
dataset = dataset.repeat(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=10 * 1024 * 1024, compression=compression))
next_fn = self.getNext(dataset)
for _ in range(10):
self.evaluate(next_fn())
num_files = 1
if compression == snapshot.COMPRESSION_NONE:
num_files = 3
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, num_files)
@combinations.generate(test_base.default_test_combinations())
def testAdditionalOperationsAfterReadBack(self):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, expected)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset2, expected)
expected_after = [
b"cord %d of file %d" % (r, f)
for f in range(0, 10)
for r in range(0, 10)
]
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(snapshot.legacy_snapshot(tmpdir))
dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000))
self.assertDatasetProduces(dataset3, expected_after)
class SnapshotCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_snapshot_dataset(self, repeat=False):
def ds_fn():
self._snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self._snapshot_dir):
os.mkdir(self._snapshot_dir)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
if repeat:
dataset = dataset.repeat(2)
return dataset
return ds_fn
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeEpochEndNoRepeat(self):
ds_fn = self._build_snapshot_dataset(repeat=False)
outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(50))
outputs.extend(
self.gen_outputs(ds_fn, [], 50, ckpt_saved=True, verify_exhausted=True))
self.assertSequenceEqual(outputs, range(100))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeOneEpochWithReading(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(50)))
t = self.gen_outputs(ds_fn, [], 150, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(50)) + list(range(50, 100)) + list(range(100)))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeOneEpochThenRunAFewSteps(self):
ds_fn = self._build_snapshot_dataset(repeat=False)
outputs = self.gen_outputs(
ds_fn, [10], 20, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(20))
outputs = outputs[:10]
outputs.extend(
self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True))
self.assertSequenceEqual(outputs, range(100))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointAfterOneEpoch(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
outputs = self.gen_outputs(ds_fn, [], 110, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(100)) + list(range(10)))
t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(100)) + list(range(10)) + list(range(10, 100)))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointAfterOneEpochRunFewSteps(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
outputs = self.gen_outputs(
ds_fn, [110], 120, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(100)) + list(range(20)))
outputs = outputs[:110]
t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(100)) + list(range(10)) + list(range(10, 100)))
class LegacySnapshotCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_snapshot_dataset(self,
num_threads=1,
repeat=False,
pending_snapshot_expiry_seconds=-1,
shard_size_bytes=None):
def ds_fn():
self.snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self.snapshot_dir):
os.mkdir(self.snapshot_dir)
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(
self.snapshot_dir,
num_writer_threads=num_threads,
writer_buffer_size=2 * num_threads,
num_reader_threads=num_threads,
reader_buffer_size=2 * num_threads,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=shard_size_bytes))
if repeat:
dataset = dataset.repeat(2)
return dataset
return ds_fn
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testSnapshotBeforeEpochEnd(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(ds_fn, [], 100, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(100))
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsSmallShardMultiThread(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=100)
outputs = []
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(ds_fn)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
start = 0
end = 100
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self._save(sess, saver)
start = 100
end = 400
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self.assertSequenceEqual(outputs, range(400))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
fp_dir_list = os.listdir(self.snapshot_dir)
self.assertLen(list(fp_dir_list), 2)
for d in fp_dir_list:
if not d.endswith("-graph.pbtxt"):
fp_dir = os.path.join(self.snapshot_dir, d)
run_dir_list = os.listdir(fp_dir)
self.assertLen(list(run_dir_list), 2)
for e in run_dir_list:
if e != "snapshot.metadata":
run_dir = os.path.join(fp_dir, e)
self.assertLen(list(os.listdir(run_dir)), 258)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsMultipleThreads(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
num_threads=2,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpoch(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(ds_fn, [], 1100, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(1000)) + list(range(100)))
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(900)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(
ds_fn, [1100],
1200,
verify_exhausted=False,
save_checkpoint_at_end=False)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(100)))
outputs = outputs[:1100]
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs, (list(range(1000)) + list(range(100)) + list(range(900))))
if __name__ == "__main__":
test.main()
| true | true |
f720977f63fc56cdf8a97938eefdecd9ebe62107 | 651 | py | Python | src/prereq/exercise8.py | kradical/cluster-analysis-udemy | e2101bdb08ae3b9ed0ed8c4c1c488e3a75a1b7c5 | [
"MIT"
] | null | null | null | src/prereq/exercise8.py | kradical/cluster-analysis-udemy | e2101bdb08ae3b9ed0ed8c4c1c488e3a75a1b7c5 | [
"MIT"
] | null | null | null | src/prereq/exercise8.py | kradical/cluster-analysis-udemy | e2101bdb08ae3b9ed0ed8c4c1c488e3a75a1b7c5 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# Plot a spiral dataset
def generateArm(rotation, step):
theta = np.random.rand(500) * step
r = np.exp(theta) - 1
x = r * np.cos(theta) + (np.random.rand(500) - 0.5) / 7
y = r * np.sin(theta) + (np.random.rand(500) - 0.5) / 7
x, y = x * np.cos(rotation) - y * np.sin(rotation), x * np.sin(rotation) + y * np.cos(rotation)
return (x, y)
def main():
arms = 6
step = 2 * np.pi / arms
for i in range(arms):
rotation = i * step
x, y = generateArm(rotation, step)
plt.scatter(x, y)
plt.show()
if __name__ == '__main__':
main()
| 20.34375 | 99 | 0.563748 | import numpy as np
import matplotlib.pyplot as plt
def generateArm(rotation, step):
theta = np.random.rand(500) * step
r = np.exp(theta) - 1
x = r * np.cos(theta) + (np.random.rand(500) - 0.5) / 7
y = r * np.sin(theta) + (np.random.rand(500) - 0.5) / 7
x, y = x * np.cos(rotation) - y * np.sin(rotation), x * np.sin(rotation) + y * np.cos(rotation)
return (x, y)
def main():
arms = 6
step = 2 * np.pi / arms
for i in range(arms):
rotation = i * step
x, y = generateArm(rotation, step)
plt.scatter(x, y)
plt.show()
if __name__ == '__main__':
main()
| true | true |
f7209acf0f181cb1058d455de2841e729f3c8cd5 | 3,798 | py | Python | mlops/parallelm/mlops/stats/health/categorical_hist_stat.py | lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | [
"Apache-2.0"
] | 7 | 2019-04-08T02:31:55.000Z | 2021-11-15T14:40:49.000Z | mlops/parallelm/mlops/stats/health/categorical_hist_stat.py | lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | [
"Apache-2.0"
] | 31 | 2019-02-22T22:23:26.000Z | 2021-08-02T17:17:06.000Z | mlops/parallelm/mlops/stats/health/categorical_hist_stat.py | lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | [
"Apache-2.0"
] | 8 | 2019-03-15T23:46:08.000Z | 2020-02-06T09:16:02.000Z | """
The Code contains functions to calculate univariate statistics for categorical features, given a dataset.
"""
import numpy as np
from parallelm.mlops.stats.health.histogram_data_objects import CategoricalHistogramDataObject
class CategoricalHistogram(object):
"""
Class is responsible for providing fit and get_feature_histogram_rep functionality of categorical training dataset.
"""
def __init__(self):
"""
Class constructor to init variables.
:rtype: object
"""
# holds array of tuple of bin and edges. order can be followed by features list.
self._prob_dist_categorical = []
self._features = []
def fit(self, training_feature_values, training_feature_names, num_bins, pred_bins):
"""
Function is responsible for fitting training data and fill up _prob_dist_categorical containing tuples of bin and edges.
:rtype: self
"""
if isinstance(training_feature_values, np.ndarray):
prob_dist = self._cal_hist_params(training_feature_values, num_bins=num_bins, pred_bins=pred_bins)
self._prob_dist_categorical = prob_dist
self._features = training_feature_names
else:
raise Exception("categorical histograms are generated on numpy array only!")
return self
def get_feature_histogram_rep(self):
"""
Function is responsible for creating formatted representation of categorical histogram. It will be used in forwarding stat through MLOps.
:rtype: list containing CategoricalHistogramDataObject
"""
feature_histogram_rep = []
for index in range(len(self._features)):
edges = self._prob_dist_categorical[index][0]
# Since, edges can be of type string, bins array in python tuple can be stored as string.
# To make it strong, converting it to float on fly.
bins_str = self._prob_dist_categorical[index][1]
# Converting to float!
bins = [float(i) for i in bins_str]
normalized_bins = bins / np.sum(bins)
edges_rep = []
for each_edge_index in range(0, len(edges)):
edges_rep.append(str(edges[each_edge_index]))
categorical_histogram_data_object = CategoricalHistogramDataObject(feature_name=self._features[index],
edges=edges_rep,
bins=normalized_bins)
feature_histogram_rep.append(categorical_histogram_data_object)
return feature_histogram_rep
@staticmethod
def _cal_hist_params(sample, num_bins, pred_bins=None):
"""
Calculate the probability of each category in each column, assuming multi-nomial distribution.
:param sample: A dataset that is a 2D numpy array
:param num_bins: Number of bins to create. Although it is not used right now. But to make it scalable, passing now.
:param pred_bins: pre-defined bins. Although it is not used right now. But to make it scalable, passing now.
:rtype: prob_dist: A list containing the probability distribution of categories in each column of the sample.
Order of arrays in the list is same as the order of columns in sample data
"""
# convert whole nd array to float!
sample = sample.astype(str)
prob_dist = []
for a in range(0, sample.shape[1]):
# Determine the frequency of unique values
unique, counts = np.unique(sample[:, a], return_counts=True)
prob_dist.append(np.asarray((unique, counts * 1.0)))
return prob_dist
| 42.2 | 145 | 0.645076 |
import numpy as np
from parallelm.mlops.stats.health.histogram_data_objects import CategoricalHistogramDataObject
class CategoricalHistogram(object):
def __init__(self):
self._prob_dist_categorical = []
self._features = []
def fit(self, training_feature_values, training_feature_names, num_bins, pred_bins):
if isinstance(training_feature_values, np.ndarray):
prob_dist = self._cal_hist_params(training_feature_values, num_bins=num_bins, pred_bins=pred_bins)
self._prob_dist_categorical = prob_dist
self._features = training_feature_names
else:
raise Exception("categorical histograms are generated on numpy array only!")
return self
def get_feature_histogram_rep(self):
feature_histogram_rep = []
for index in range(len(self._features)):
edges = self._prob_dist_categorical[index][0]
bins_str = self._prob_dist_categorical[index][1]
bins = [float(i) for i in bins_str]
normalized_bins = bins / np.sum(bins)
edges_rep = []
for each_edge_index in range(0, len(edges)):
edges_rep.append(str(edges[each_edge_index]))
categorical_histogram_data_object = CategoricalHistogramDataObject(feature_name=self._features[index],
edges=edges_rep,
bins=normalized_bins)
feature_histogram_rep.append(categorical_histogram_data_object)
return feature_histogram_rep
@staticmethod
def _cal_hist_params(sample, num_bins, pred_bins=None):
sample = sample.astype(str)
prob_dist = []
for a in range(0, sample.shape[1]):
unique, counts = np.unique(sample[:, a], return_counts=True)
prob_dist.append(np.asarray((unique, counts * 1.0)))
return prob_dist
| true | true |
f7209b17100218a42c80c8e984c08597d630b188 | 47 | py | Python | odoo-13.0/myaddons/SCM/__init__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 181 | 2016-11-11T04:39:43.000Z | 2022-03-14T21:17:19.000Z | odoo-13.0/myaddons/SCM/__init__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 899 | 2016-11-14T02:42:56.000Z | 2022-03-29T20:47:39.000Z | odoo-13.0/myaddons/SCM/__init__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 227 | 2016-11-10T17:16:59.000Z | 2022-03-26T16:46:38.000Z | from . import models
from . import controllers
| 15.666667 | 25 | 0.787234 | from . import models
from . import controllers
| true | true |
f7209b3cfc5d5bd59dca31198344266661bee81a | 94 | py | Python | vaccine_card/api/apps.py | Unanimad/lais_046_2020_etapa_2 | 630efc6b25a580be44b6cd50be6744a01221a2c4 | [
"Apache-2.0"
] | null | null | null | vaccine_card/api/apps.py | Unanimad/lais_046_2020_etapa_2 | 630efc6b25a580be44b6cd50be6744a01221a2c4 | [
"Apache-2.0"
] | null | null | null | vaccine_card/api/apps.py | Unanimad/lais_046_2020_etapa_2 | 630efc6b25a580be44b6cd50be6744a01221a2c4 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'vaccine_card.api'
| 15.666667 | 33 | 0.755319 | from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'vaccine_card.api'
| true | true |
f7209bc8e13eb725ec7ffbe1187a20a64ae43a57 | 3,850 | py | Python | EWR/ab6_v2/Sort.py | Koopakiller/Edu | 575c43dae24a4432e8c8fb2eda96e948cc33ec32 | [
"MIT"
] | null | null | null | EWR/ab6_v2/Sort.py | Koopakiller/Edu | 575c43dae24a4432e8c8fb2eda96e948cc33ec32 | [
"MIT"
] | null | null | null | EWR/ab6_v2/Sort.py | Koopakiller/Edu | 575c43dae24a4432e8c8fb2eda96e948cc33ec32 | [
"MIT"
] | null | null | null | # coding=utf-8
# Author: Tom Lambert
# Content: Implementierung der Sort-Klasse für ab6.
class Sort(object):
"""Implementiert Sortier-Algorithmen mit der Möglichkeit einer statistischen Auswertung"""
def __init__(self):
self.counter_swap = 0 # entspricht ca 2 Elementabrufen und 2 Elementzuweisungen
self.counter_list_item_assignment = 0
self.counter_item_compare = 0
self.counter_get_item_from_list = 0
self.counter_add_item_to_result_list = 0
self.counter_recursive_call = 0
self.counter_split_list = 0
self.counter_copy_list = 0
self.counter_sort_call = 0
def quick_sort(self, lst):
"""
Sortiert die lst-Liste mit dem Quick-Sort-Algorithmus und gibt die sortierte Liste zurück.
Bestimmte Operationen werden in den counter_-Variablen gezählt.
"""
self.counter_sort_call += 1
if len(lst) > 1:
self.counter_get_item_from_list += 1
pivot = lst[0]
ltp = [] # less than pivot item
gtp = [] # greater than pivot item
ep = [] # equals pivot item
for item in lst:
self.counter_get_item_from_list += 1
self.counter_item_compare += 1
if item < pivot:
self.counter_add_item_to_result_list += 1
ltp.append(item)
elif item > pivot:
self.counter_add_item_to_result_list += 1
gtp.append(item)
else:
self.counter_add_item_to_result_list += 1
ep.append(item)
self.counter_split_list += 1
self.counter_recursive_call += 1
ltp = self.quick_sort(ltp)
self.counter_recursive_call += 1
gtp = self.quick_sort(gtp)
result = ltp
self.counter_add_item_to_result_list += len(ep)
result.extend(ep)
self.counter_add_item_to_result_list += len(gtp)
result.extend(gtp)
return result
else:
return lst
def gnome_sort(self, lst):
"""
Sortiert die lst-Liste mit dem Gnome-Sort-Algorithmus und gibt die sortierte Liste zurück.
Bestimmte Operationen werden in den counter_-Variablen gezählt.
"""
self.counter_sort_call += 1
self.counter_copy_list += 1
lst = list(lst) # copy the list, because lists are mutable and passed by reference
pos = 0
while pos < len(lst):
self.counter_get_item_from_list += 2
self.counter_item_compare += 1
if pos == 0 or lst[pos] >= lst[pos - 1]:
pos += 1
else:
self.counter_swap += 1
lst[pos], lst[pos - 1] = lst[pos - 1], lst[pos]
pos -= 1
return lst
def insertion_sort(self, lst):
"""
Sortiert die lst-Liste mit dem Insertion-Sort-Algorithmus und gibt die sortierte Liste zurück.
Bestimmte Operationen werden in den counter_-Variablen gezählt.
"""
self.counter_sort_call += 1
self.counter_copy_list += 1
lst = list(lst) # copy the list, because lists are mutable and passed by reference
for i in range(1, len(lst)):
self.counter_get_item_from_list += 1
val = lst[i]
j = i
while j > 0 and lst[j - 1] > val:
self.counter_item_compare += 1
self.counter_get_item_from_list += 1
self.counter_list_item_assignment += 1
lst[j] = lst[j - 1]
j = j - 1 # breaks the while loop
lst[j] = val
self.counter_list_item_assignment += 1
return lst
| 38.118812 | 103 | 0.562857 |
class Sort(object):
def __init__(self):
self.counter_swap = 0
self.counter_list_item_assignment = 0
self.counter_item_compare = 0
self.counter_get_item_from_list = 0
self.counter_add_item_to_result_list = 0
self.counter_recursive_call = 0
self.counter_split_list = 0
self.counter_copy_list = 0
self.counter_sort_call = 0
def quick_sort(self, lst):
self.counter_sort_call += 1
if len(lst) > 1:
self.counter_get_item_from_list += 1
pivot = lst[0]
ltp = []
gtp = []
ep = []
for item in lst:
self.counter_get_item_from_list += 1
self.counter_item_compare += 1
if item < pivot:
self.counter_add_item_to_result_list += 1
ltp.append(item)
elif item > pivot:
self.counter_add_item_to_result_list += 1
gtp.append(item)
else:
self.counter_add_item_to_result_list += 1
ep.append(item)
self.counter_split_list += 1
self.counter_recursive_call += 1
ltp = self.quick_sort(ltp)
self.counter_recursive_call += 1
gtp = self.quick_sort(gtp)
result = ltp
self.counter_add_item_to_result_list += len(ep)
result.extend(ep)
self.counter_add_item_to_result_list += len(gtp)
result.extend(gtp)
return result
else:
return lst
def gnome_sort(self, lst):
self.counter_sort_call += 1
self.counter_copy_list += 1
lst = list(lst)
pos = 0
while pos < len(lst):
self.counter_get_item_from_list += 2
self.counter_item_compare += 1
if pos == 0 or lst[pos] >= lst[pos - 1]:
pos += 1
else:
self.counter_swap += 1
lst[pos], lst[pos - 1] = lst[pos - 1], lst[pos]
pos -= 1
return lst
def insertion_sort(self, lst):
self.counter_sort_call += 1
self.counter_copy_list += 1
lst = list(lst)
for i in range(1, len(lst)):
self.counter_get_item_from_list += 1
val = lst[i]
j = i
while j > 0 and lst[j - 1] > val:
self.counter_item_compare += 1
self.counter_get_item_from_list += 1
self.counter_list_item_assignment += 1
lst[j] = lst[j - 1]
j = j - 1
lst[j] = val
self.counter_list_item_assignment += 1
return lst
| true | true |
f7209bcc24d3c78f312379c3537a4b94dc7b13db | 436 | py | Python | packages/python/plotly/plotly/validators/icicle/marker/colorbar/_tickangle.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/icicle/marker/colorbar/_tickangle.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/icicle/marker/colorbar/_tickangle.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name="tickangle", parent_name="icicle.marker.colorbar", **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| 31.142857 | 85 | 0.665138 | import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name="tickangle", parent_name="icicle.marker.colorbar", **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| true | true |
f7209c409ce293886a6af24bf96c863400e8931e | 4,791 | py | Python | luminoth/utils/bbox_transform_tf.py | jsdussanc/luminoth | 7637c52cc01d2826a231fef43746aa10951f99f0 | [
"BSD-3-Clause"
] | 2,584 | 2017-08-16T20:31:52.000Z | 2022-03-16T07:53:54.000Z | luminoth/utils/bbox_transform_tf.py | dun933/Tabulo | dc1c1203a40e1ecf2aaca9647f3008ab72b41438 | [
"BSD-3-Clause"
] | 197 | 2017-08-17T14:49:18.000Z | 2022-02-10T01:50:50.000Z | luminoth/utils/bbox_transform_tf.py | dun933/Tabulo | dc1c1203a40e1ecf2aaca9647f3008ab72b41438 | [
"BSD-3-Clause"
] | 462 | 2017-08-16T22:00:23.000Z | 2022-03-08T19:14:00.000Z | import tensorflow as tf
def get_width_upright(bboxes):
with tf.name_scope('BoundingBoxTransform/get_width_upright'):
bboxes = tf.cast(bboxes, tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = x2 - x1 + 1.
height = y2 - y1 + 1.
# Calculate up right point of bbox (urx = up right x)
urx = x1 + .5 * width
ury = y1 + .5 * height
return width, height, urx, ury
def encode(bboxes, gt_boxes, variances=None):
with tf.name_scope('BoundingBoxTransform/encode'):
(bboxes_width, bboxes_height,
bboxes_urx, bboxes_ury) = get_width_upright(bboxes)
(gt_boxes_width, gt_boxes_height,
gt_boxes_urx, gt_boxes_ury) = get_width_upright(gt_boxes)
if variances is None:
variances = [1., 1.]
targets_dx = (gt_boxes_urx - bboxes_urx)/(bboxes_width * variances[0])
targets_dy = (gt_boxes_ury - bboxes_ury)/(bboxes_height * variances[0])
targets_dw = tf.log(gt_boxes_width / bboxes_width) / variances[1]
targets_dh = tf.log(gt_boxes_height / bboxes_height) / variances[1]
targets = tf.concat(
[targets_dx, targets_dy, targets_dw, targets_dh], axis=1)
return targets
def decode(roi, deltas, variances=None):
with tf.name_scope('BoundingBoxTransform/decode'):
(roi_width, roi_height,
roi_urx, roi_ury) = get_width_upright(roi)
dx, dy, dw, dh = tf.split(deltas, 4, axis=1)
if variances is None:
variances = [1., 1.]
pred_ur_x = dx * roi_width * variances[0] + roi_urx
pred_ur_y = dy * roi_height * variances[0] + roi_ury
pred_w = tf.exp(dw * variances[1]) * roi_width
pred_h = tf.exp(dh * variances[1]) * roi_height
bbox_x1 = pred_ur_x - 0.5 * pred_w
bbox_y1 = pred_ur_y - 0.5 * pred_h
# This -1. extra is different from reference implementation.
bbox_x2 = pred_ur_x + 0.5 * pred_w - 1.
bbox_y2 = pred_ur_y + 0.5 * pred_h - 1.
bboxes = tf.concat(
[bbox_x1, bbox_y1, bbox_x2, bbox_y2], axis=1)
return bboxes
def clip_boxes(bboxes, imshape):
"""
Clips bounding boxes to image boundaries based on image shape.
Args:
bboxes: Tensor with shape (num_bboxes, 4)
where point order is x1, y1, x2, y2.
imshape: Tensor with shape (2, )
where the first value is height and the next is width.
Returns
Tensor with same shape as bboxes but making sure that none
of the bboxes are outside the image.
"""
with tf.name_scope('BoundingBoxTransform/clip_bboxes'):
bboxes = tf.cast(bboxes, dtype=tf.float32)
imshape = tf.cast(imshape, dtype=tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = imshape[1]
height = imshape[0]
x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)
x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)
y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0)
y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0)
bboxes = tf.concat([x1, y1, x2, y2], axis=1)
return bboxes
def change_order(bboxes):
"""Change bounding box encoding order.
TensorFlow works with the (y_min, x_min, y_max, x_max) order while we work
with the (x_min, y_min, x_max, y_min).
While both encoding options have its advantages and disadvantages we
decided to use the (x_min, y_min, x_max, y_min), forcing use to switch to
TensorFlow's every time we want to use a std function that handles bounding
boxes.
Args:
bboxes: A Tensor of shape (total_bboxes, 4)
Returns:
bboxes: A Tensor of shape (total_bboxes, 4) with the order swaped.
"""
with tf.name_scope('BoundingBoxTransform/change_order'):
first_min, second_min, first_max, second_max = tf.unstack(
bboxes, axis=1
)
bboxes = tf.stack(
[second_min, first_min, second_max, first_max], axis=1
)
return bboxes
if __name__ == '__main__':
import numpy as np
bboxes = tf.placeholder(tf.float32)
bboxes_val = [[10, 10, 20, 22]]
gt_boxes = tf.placeholder(tf.float32)
gt_boxes_val = [[11, 13, 34, 31]]
imshape = tf.placeholder(tf.int32)
imshape_val = (100, 100)
deltas = encode(bboxes, gt_boxes)
decoded_bboxes = decode(bboxes, deltas)
final_decoded_bboxes = clip_boxes(decoded_bboxes, imshape)
with tf.Session() as sess:
final_decoded_bboxes = sess.run(final_decoded_bboxes, feed_dict={
bboxes: bboxes_val,
gt_boxes: gt_boxes_val,
imshape: imshape_val,
})
assert np.all(gt_boxes_val == final_decoded_bboxes)
| 31.313725 | 79 | 0.622626 | import tensorflow as tf
def get_width_upright(bboxes):
with tf.name_scope('BoundingBoxTransform/get_width_upright'):
bboxes = tf.cast(bboxes, tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = x2 - x1 + 1.
height = y2 - y1 + 1.
urx = x1 + .5 * width
ury = y1 + .5 * height
return width, height, urx, ury
def encode(bboxes, gt_boxes, variances=None):
with tf.name_scope('BoundingBoxTransform/encode'):
(bboxes_width, bboxes_height,
bboxes_urx, bboxes_ury) = get_width_upright(bboxes)
(gt_boxes_width, gt_boxes_height,
gt_boxes_urx, gt_boxes_ury) = get_width_upright(gt_boxes)
if variances is None:
variances = [1., 1.]
targets_dx = (gt_boxes_urx - bboxes_urx)/(bboxes_width * variances[0])
targets_dy = (gt_boxes_ury - bboxes_ury)/(bboxes_height * variances[0])
targets_dw = tf.log(gt_boxes_width / bboxes_width) / variances[1]
targets_dh = tf.log(gt_boxes_height / bboxes_height) / variances[1]
targets = tf.concat(
[targets_dx, targets_dy, targets_dw, targets_dh], axis=1)
return targets
def decode(roi, deltas, variances=None):
with tf.name_scope('BoundingBoxTransform/decode'):
(roi_width, roi_height,
roi_urx, roi_ury) = get_width_upright(roi)
dx, dy, dw, dh = tf.split(deltas, 4, axis=1)
if variances is None:
variances = [1., 1.]
pred_ur_x = dx * roi_width * variances[0] + roi_urx
pred_ur_y = dy * roi_height * variances[0] + roi_ury
pred_w = tf.exp(dw * variances[1]) * roi_width
pred_h = tf.exp(dh * variances[1]) * roi_height
bbox_x1 = pred_ur_x - 0.5 * pred_w
bbox_y1 = pred_ur_y - 0.5 * pred_h
bbox_x2 = pred_ur_x + 0.5 * pred_w - 1.
bbox_y2 = pred_ur_y + 0.5 * pred_h - 1.
bboxes = tf.concat(
[bbox_x1, bbox_y1, bbox_x2, bbox_y2], axis=1)
return bboxes
def clip_boxes(bboxes, imshape):
with tf.name_scope('BoundingBoxTransform/clip_bboxes'):
bboxes = tf.cast(bboxes, dtype=tf.float32)
imshape = tf.cast(imshape, dtype=tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = imshape[1]
height = imshape[0]
x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)
x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)
y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0)
y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0)
bboxes = tf.concat([x1, y1, x2, y2], axis=1)
return bboxes
def change_order(bboxes):
with tf.name_scope('BoundingBoxTransform/change_order'):
first_min, second_min, first_max, second_max = tf.unstack(
bboxes, axis=1
)
bboxes = tf.stack(
[second_min, first_min, second_max, first_max], axis=1
)
return bboxes
if __name__ == '__main__':
import numpy as np
bboxes = tf.placeholder(tf.float32)
bboxes_val = [[10, 10, 20, 22]]
gt_boxes = tf.placeholder(tf.float32)
gt_boxes_val = [[11, 13, 34, 31]]
imshape = tf.placeholder(tf.int32)
imshape_val = (100, 100)
deltas = encode(bboxes, gt_boxes)
decoded_bboxes = decode(bboxes, deltas)
final_decoded_bboxes = clip_boxes(decoded_bboxes, imshape)
with tf.Session() as sess:
final_decoded_bboxes = sess.run(final_decoded_bboxes, feed_dict={
bboxes: bboxes_val,
gt_boxes: gt_boxes_val,
imshape: imshape_val,
})
assert np.all(gt_boxes_val == final_decoded_bboxes)
| true | true |
f7209df8e01166c38a64b49337433ccac2442afc | 2,808 | py | Python | 5-MaksimovKA/code/submit.py | remtav/SpaceNet7_Multi-Temporal_Solutions | ee535c61fc22bffa45331519239c6d1b044b1514 | [
"Apache-2.0"
] | 38 | 2021-02-18T07:04:54.000Z | 2022-03-22T15:31:06.000Z | 5-MaksimovKA/code/submit.py | remtav/SpaceNet7_Multi-Temporal_Solutions | ee535c61fc22bffa45331519239c6d1b044b1514 | [
"Apache-2.0"
] | 2 | 2021-02-22T18:53:19.000Z | 2021-06-22T20:28:06.000Z | 5-MaksimovKA/code/submit.py | remtav/SpaceNet7_Multi-Temporal_Solutions | ee535c61fc22bffa45331519239c6d1b044b1514 | [
"Apache-2.0"
] | 15 | 2021-02-25T17:25:40.000Z | 2022-01-31T16:59:32.000Z | import os
import tqdm
import glob
import fiona
import geopandas as gpd
from fire import Fire
def sn7_convert_geojsons_to_csv(json_dirs, output_csv_path, population='proposal'):
'''
Convert jsons to csv
Population is either "ground" or "proposal"
'''
first_file = True # switch that will be turned off once we process the first file
for json_dir in tqdm.tqdm(json_dirs[:]):
json_files = sorted(glob.glob(os.path.join(json_dir, '*.geojson')))
for json_file in tqdm.tqdm(json_files):
try:
df = gpd.read_file(json_file)
except (fiona.errors.DriverError):
message = '! Invalid dataframe for %s' % json_file
print(message)
continue
# raise Exception(message)
if population == 'ground':
file_name_col = df.image_fname.apply(lambda x: os.path.splitext(x)[0])
elif population == 'proposal':
file_name_col = os.path.splitext(os.path.basename(json_file))[0]
else:
raise Exception('! Invalid population')
if len(df) == 0:
message = '! Empty dataframe for %s' % json_file
print(message)
# raise Exception(message)
df = gpd.GeoDataFrame({
'filename': file_name_col,
'id': 0,
'geometry': "POLYGON EMPTY",
})
else:
try:
df = gpd.GeoDataFrame({
'filename': file_name_col,
'id': df.Id.astype(int),
'geometry': df.geometry,
})
except:
print(df)
if first_file:
net_df = df
first_file = False
else:
net_df = net_df.append(df)
net_df.to_csv(output_csv_path, index=False)
return net_df
def make_submit(out_file='/wdata/solution.csv'):
pred_top_dir = '/wdata/'
# out_dir_csv = os.path.join(pred_top_dir, 'csvs')
# os.makedirs(out_dir_csv, exist_ok=True)
# prop_file = os.path.join(out_dir_csv, 'solution.csv')
prop_file = out_file
aoi_dirs = sorted([os.path.join(pred_top_dir, 'pred_jsons_match', aoi) \
for aoi in os.listdir(os.path.join(pred_top_dir, 'pred_jsons_match')) \
if os.path.isdir(os.path.join(pred_top_dir, 'pred_jsons_match', aoi))])
print("aoi_dirs:", aoi_dirs)
# Execute
if os.path.exists(prop_file):
os.remove(prop_file)
net_df = sn7_convert_geojsons_to_csv(aoi_dirs, prop_file, 'proposal')
print("prop_file:", prop_file)
if __name__ == '__main__':
Fire(make_submit) | 35.1 | 94 | 0.551994 | import os
import tqdm
import glob
import fiona
import geopandas as gpd
from fire import Fire
def sn7_convert_geojsons_to_csv(json_dirs, output_csv_path, population='proposal'):
first_file = True
for json_dir in tqdm.tqdm(json_dirs[:]):
json_files = sorted(glob.glob(os.path.join(json_dir, '*.geojson')))
for json_file in tqdm.tqdm(json_files):
try:
df = gpd.read_file(json_file)
except (fiona.errors.DriverError):
message = '! Invalid dataframe for %s' % json_file
print(message)
continue
if population == 'ground':
file_name_col = df.image_fname.apply(lambda x: os.path.splitext(x)[0])
elif population == 'proposal':
file_name_col = os.path.splitext(os.path.basename(json_file))[0]
else:
raise Exception('! Invalid population')
if len(df) == 0:
message = '! Empty dataframe for %s' % json_file
print(message)
df = gpd.GeoDataFrame({
'filename': file_name_col,
'id': 0,
'geometry': "POLYGON EMPTY",
})
else:
try:
df = gpd.GeoDataFrame({
'filename': file_name_col,
'id': df.Id.astype(int),
'geometry': df.geometry,
})
except:
print(df)
if first_file:
net_df = df
first_file = False
else:
net_df = net_df.append(df)
net_df.to_csv(output_csv_path, index=False)
return net_df
def make_submit(out_file='/wdata/solution.csv'):
pred_top_dir = '/wdata/'
prop_file = out_file
aoi_dirs = sorted([os.path.join(pred_top_dir, 'pred_jsons_match', aoi) \
for aoi in os.listdir(os.path.join(pred_top_dir, 'pred_jsons_match')) \
if os.path.isdir(os.path.join(pred_top_dir, 'pred_jsons_match', aoi))])
print("aoi_dirs:", aoi_dirs)
if os.path.exists(prop_file):
os.remove(prop_file)
net_df = sn7_convert_geojsons_to_csv(aoi_dirs, prop_file, 'proposal')
print("prop_file:", prop_file)
if __name__ == '__main__':
Fire(make_submit) | true | true |
f7209e86a2976584564066d1289e7d7d2268a733 | 2,401 | py | Python | tests/commands/test_modifyoncondition.py | lpd-patrick/oaxmlapi | 1e73881c290ca3181c2d33a7b5fa74fb5f86e62c | [
"MIT"
] | 25 | 2015-05-20T01:23:39.000Z | 2021-03-01T17:13:59.000Z | tests/commands/test_modifyoncondition.py | lpd-patrick/oaxmlapi | 1e73881c290ca3181c2d33a7b5fa74fb5f86e62c | [
"MIT"
] | 16 | 2015-03-03T00:59:29.000Z | 2021-11-30T16:45:15.000Z | tests/commands/test_modifyoncondition.py | lpd-patrick/oaxmlapi | 1e73881c290ca3181c2d33a7b5fa74fb5f86e62c | [
"MIT"
] | 12 | 2016-01-04T20:06:44.000Z | 2020-09-27T20:15:27.000Z | # -*- coding: utf-8
from __future__ import absolute_import
import unittest
from oaxmlapi import commands, datatypes
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class TestModifyOnConditionClass(unittest.TestCase):
def test_str(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertEqual(
str(commands.ModifyOnCondition('Slip', slip, date)),
'<ModifyOnCondition type=Slip>'
)
def test_modifyoncondition(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertIsInstance(
commands.ModifyOnCondition('Slip', slip, date).modify(),
ET.Element
)
def test_tostring(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertEqual(
commands.ModifyOnCondition('Slip', slip, date).tostring(),
(
b'<ModifyOnCondition condition="if-not-updated" type="Slip">'
b'<Slip><id>1234</id></Slip><Date><year>2012</year></Date>'
b'</ModifyOnCondition>'
)
)
def test_prettify(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertEqual(
commands.ModifyOnCondition('Slip', slip, date).prettify(),
(
b'<?xml version="1.0" encoding="utf-8"?>\n'
b'<ModifyOnCondition condition="if-not-updated" type="Slip">\n'
b' <Slip>\n'
b' <id>1234</id>\n'
b' </Slip>\n'
b' <Date>\n'
b' <year>2012</year>\n'
b' </Date>\n'
b'</ModifyOnCondition>\n'
)
)
suite = unittest.TestLoader().loadTestsFromTestCase(TestModifyOnConditionClass)
unittest.TextTestRunner(verbosity=2).run(suite)
| 27.918605 | 79 | 0.493544 |
from __future__ import absolute_import
import unittest
from oaxmlapi import commands, datatypes
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class TestModifyOnConditionClass(unittest.TestCase):
def test_str(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertEqual(
str(commands.ModifyOnCondition('Slip', slip, date)),
'<ModifyOnCondition type=Slip>'
)
def test_modifyoncondition(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertIsInstance(
commands.ModifyOnCondition('Slip', slip, date).modify(),
ET.Element
)
def test_tostring(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertEqual(
commands.ModifyOnCondition('Slip', slip, date).tostring(),
(
b'<ModifyOnCondition condition="if-not-updated" type="Slip">'
b'<Slip><id>1234</id></Slip><Date><year>2012</year></Date>'
b'</ModifyOnCondition>'
)
)
def test_prettify(self):
slip = datatypes.Datatype(
'Slip',
{'id': '1234'}
)
date = datatypes.Datatype(
'Date',
{'year': '2012'}
)
self.assertEqual(
commands.ModifyOnCondition('Slip', slip, date).prettify(),
(
b'<?xml version="1.0" encoding="utf-8"?>\n'
b'<ModifyOnCondition condition="if-not-updated" type="Slip">\n'
b' <Slip>\n'
b' <id>1234</id>\n'
b' </Slip>\n'
b' <Date>\n'
b' <year>2012</year>\n'
b' </Date>\n'
b'</ModifyOnCondition>\n'
)
)
suite = unittest.TestLoader().loadTestsFromTestCase(TestModifyOnConditionClass)
unittest.TextTestRunner(verbosity=2).run(suite)
| true | true |
f7209ed2b871197ee55089411c3d1ab6b323fbba | 3,956 | py | Python | docs/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py | shiyutang/docs | b05612213a08daf9f225abce08fc42f924ef51ad | [
"Apache-2.0"
] | 104 | 2018-09-04T08:16:05.000Z | 2021-05-06T20:45:26.000Z | docs/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py | shiyutang/docs | b05612213a08daf9f225abce08fc42f924ef51ad | [
"Apache-2.0"
] | 1,582 | 2018-06-25T06:14:11.000Z | 2021-05-14T16:00:43.000Z | docs/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py | shiyutang/docs | b05612213a08daf9f225abce08fc42f924ef51ad | [
"Apache-2.0"
] | 387 | 2018-06-20T07:42:32.000Z | 2021-05-14T08:35:28.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid.core as core
import math
import os
import sys
import numpy
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 64
PASS_NUM = 1
def loss_net(hidden, label):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
return prediction, avg_loss, acc
def conv_net(img, label):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
return loss_net(conv_pool_2, label)
def train(use_cuda, role, endpoints, current_endpoint, trainer_id, trainers):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
prediction, avg_loss, acc = conv_net(img, label)
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_loss)
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=endpoints, trainers=trainers)
if role == "pserver":
prog = t.get_pserver_program(current_endpoint)
startup = t.get_startup_program(current_endpoint, pserver_program=prog)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup)
exe.run(prog)
elif role == "trainer":
prog = t.get_trainer_program()
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(fluid.default_startup_program())
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
acc_np, avg_loss_np = exe.run(
prog, feed=feeder.feed(data), fetch_list=[acc, avg_loss])
if (batch_id + 1) % 10 == 0:
print(
'PassID {0:1}, BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.
format(pass_id, batch_id + 1,
float(avg_loss_np.mean()), float(
acc_np.mean())))
if __name__ == '__main__':
if len(sys.argv) != 6:
print(
"Usage: python %s role endpoints current_endpoint trainer_id trainers"
% sys.argv[0])
exit(0)
role, endpoints, current_endpoint, trainer_id, trainers = \
sys.argv[1:]
train(True, role, endpoints, current_endpoint,
int(trainer_id), int(trainers))
| 35.321429 | 82 | 0.65091 |
from __future__ import print_function
import paddle.fluid.core as core
import math
import os
import sys
import numpy
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 64
PASS_NUM = 1
def loss_net(hidden, label):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
return prediction, avg_loss, acc
def conv_net(img, label):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
return loss_net(conv_pool_2, label)
def train(use_cuda, role, endpoints, current_endpoint, trainer_id, trainers):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
prediction, avg_loss, acc = conv_net(img, label)
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_loss)
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=endpoints, trainers=trainers)
if role == "pserver":
prog = t.get_pserver_program(current_endpoint)
startup = t.get_startup_program(current_endpoint, pserver_program=prog)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup)
exe.run(prog)
elif role == "trainer":
prog = t.get_trainer_program()
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(fluid.default_startup_program())
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
acc_np, avg_loss_np = exe.run(
prog, feed=feeder.feed(data), fetch_list=[acc, avg_loss])
if (batch_id + 1) % 10 == 0:
print(
'PassID {0:1}, BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.
format(pass_id, batch_id + 1,
float(avg_loss_np.mean()), float(
acc_np.mean())))
if __name__ == '__main__':
if len(sys.argv) != 6:
print(
"Usage: python %s role endpoints current_endpoint trainer_id trainers"
% sys.argv[0])
exit(0)
role, endpoints, current_endpoint, trainer_id, trainers = \
sys.argv[1:]
train(True, role, endpoints, current_endpoint,
int(trainer_id), int(trainers))
| true | true |
f7209f8d535077c8af62477c05e769c4ef76e2d5 | 9,895 | py | Python | mazda3_joystick.py | moralrecordings/elm327_joystick | edebae95f24913f7c6caed98751fb54477743169 | [
"BSD-3-Clause"
] | 7 | 2017-12-18T13:11:18.000Z | 2022-01-08T21:13:26.000Z | mazda3_joystick.py | moralrecordings/elm327_joystick | edebae95f24913f7c6caed98751fb54477743169 | [
"BSD-3-Clause"
] | null | null | null | mazda3_joystick.py | moralrecordings/elm327_joystick | edebae95f24913f7c6caed98751fb54477743169 | [
"BSD-3-Clause"
] | 3 | 2018-10-27T21:37:01.000Z | 2022-01-08T21:13:24.000Z | #!/usr/bin/env python3
import uinput
from elm327 import ELM327, PROTOCOLS
from mrcrowbar import models as mrc
import math
import time
from optparse import OptionParser
class OptParser( OptionParser ):
def format_epilog( self, formatter ):
return '\n{}\n'.format( '\n'.join( [formatter._format_text( x ) for x in self.epilog.split( '\n' )] ) )
class Steering( mrc.Block ):
RANGE = 0x00D2
axis_raw = mrc.UInt16_BE( 0x00 )
@property
def axis( self ):
return min( max( (255*(self.axis_raw - 0x8000)//self.RANGE), -255 ), 255 )
class Accelerator( mrc.Block ):
RANGE = 0xC8
axis_raw = mrc.UInt8( 0x06 )
@property
def axis( self ):
return min( max( (255*(self.axis_raw)//self.RANGE), 0 ), 255 )
class Brake( mrc.Block ):
button = mrc.Bits( 0x02, 0b01000000 )
class Cruise( mrc.Block ):
button = mrc.Bits( 0x00, 0b10000000 )
class Controls( mrc.Block ):
driver_door = mrc.Bits( 0x00, 0b10000000 )
high_beams = mrc.Bits( 0x03, 0b01000000 )
class Mazda3:
LATCH_TIME = 0.1
PRESS_THRESHOLD = 32
STEER_THRESHOLD = 64
SHOVE_THRESHOLD = 128
def __init__( self, name, mapping ):
print( 'Creating uinput device "{}"...'.format( name ) )
self.device = uinput.Device( mapping, name )
self.steering = 0
self.accelerator = 0
self.brake = 0
self.high_beams = 0
self.cruise_t = self.driver_door_t = time.time() + self.LATCH_TIME
self.cruise = 0
self.driver_door = 0
self.cruise_prev = 0
self.driver_door_prev = 0
def update( self, msg_id, msg_b ):
t = time.time()
self.cruise_prev = self.cruise
self.driver_door_prev = self.driver_door
if msg_id == 0x4da:
self.steering = Steering( msg_b ).axis
elif msg_id == 0x201:
self.accelerator = Accelerator( msg_b ).axis
elif msg_id == 0x205:
self.brake = Brake( msg_b ).button
elif msg_id == 0x4ec:
self.cruise = Cruise( msg_b ).button
elif msg_id == 0x433:
obj = Controls( msg_b )
self.high_beams = obj.high_beams
self.driver_door = obj.driver_door
else:
return
if self.cruise != self.cruise_prev:
self.cruise_t = t
if self.driver_door != self.driver_door_prev:
self.driver_door_t = t
self.set_controls()
return
def set_controls( self ):
pass
class Mazda3Joystick( Mazda3 ):
NAME = 'Mazda 3 Joystick'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (0, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3
]
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.ABS_WHEEL, self.steering )
self.device.emit( uinput.ABS_GAS, self.accelerator )
self.device.emit( uinput.BTN_0, self.brake )
self.device.emit( uinput.BTN_1, self.high_beams )
self.device.emit( uinput.BTN_2, 1 if t < (self.cruise_t + self.LATCH_TIME) else 0 )
self.device.emit( uinput.BTN_3, 1 if t < (self.driver_door_t + self.LATCH_TIME) else 0 )
return
class Mazda3Doom( Mazda3Joystick ):
NAME = 'Mazda 3 Doom'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (-255, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3
]
class Mazda3DOS( Mazda3Joystick ):
NAME = 'Mazda 3 DOS'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (-255, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3
]
def set_controls( self ):
t = time.time()
self.device.emit( uinput.ABS_WHEEL, self.steering )
self.device.emit( uinput.ABS_GAS, self.accelerator*2-255 )
self.device.emit( uinput.BTN_0, self.brake )
self.device.emit( uinput.BTN_1, self.high_beams )
self.device.emit( uinput.BTN_2, 1 if t < (self.cruise_t + self.LATCH_TIME) else 0 )
self.device.emit( uinput.BTN_3, 1 if t < (self.driver_door_t + self.LATCH_TIME) else 0 )
return
class Mazda3Descent( Mazda3 ):
NAME = 'Mazda 3 Descent'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (-255, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3,
uinput.KEY_UP,
uinput.KEY_DOWN
]
DOUBLE_TAP = 0.5
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
self.high_beams_prev = 0
self.high_beams_t = time.time()
self.high_beams_key = uinput.KEY_DOWN
def update( self, msg_id, msg_b ):
t = time.time()
self.high_beams_prev = self.high_beams
super().update( msg_id, msg_b )
if self.high_beams != self.high_beams_prev:
if self.high_beams:
self.high_beams_key = uinput.KEY_UP if (t - self.high_beams_t < self.DOUBLE_TAP) else uinput.KEY_DOWN
self.device.emit( self.high_beams_key, 1 )
self.high_beams_t = t
else:
self.device.emit( self.high_beams_key, 0 )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.ABS_WHEEL, self.steering )
self.device.emit( uinput.ABS_GAS, self.accelerator )
self.device.emit( uinput.BTN_0, self.brake )
self.device.emit( uinput.BTN_2, 1 if t < (self.cruise_t + self.LATCH_TIME) else 0 )
self.device.emit( uinput.BTN_3, 1 if t < (self.driver_door_t + self.LATCH_TIME) else 0 )
return
class Mazda3Grim( Mazda3 ):
NAME = 'Mazda 3 Grim Fandango'
DEVICE = [
uinput.KEY_LEFT,
uinput.KEY_UP,
uinput.KEY_RIGHT,
uinput.KEY_U,
uinput.KEY_LEFTSHIFT,
uinput.KEY_E,
uinput.KEY_P,
uinput.KEY_I
]
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.KEY_LEFT, 1 if self.steering < -self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_RIGHT, 1 if self.steering > self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_UP, 1 if self.accelerator > self.PRESS_THRESHOLD else 0 )
self.device.emit( uinput.KEY_LEFTSHIFT, 1 if self.accelerator > self.SHOVE_THRESHOLD else 0 )
self.device.emit( uinput.KEY_U, self.brake )
self.device.emit( uinput.KEY_E, self.high_beams )
self.device.emit( uinput.KEY_P, 1 if t < self.cruise_t + self.LATCH_TIME else 0 )
self.device.emit( uinput.KEY_I, 1 if t < self.driver_door_t + self.LATCH_TIME else 0 )
return
class Mazda3Sonic( Mazda3 ):
NAME = 'Mazda 3 Sonic'
DEVICE = [
uinput.KEY_LEFT,
uinput.KEY_UP,
uinput.KEY_RIGHT,
uinput.KEY_DOWN,
uinput.KEY_Z,
uinput.KEY_ENTER
]
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.KEY_LEFT, 1 if self.steering < -self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_RIGHT, 1 if self.steering > self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_Z, 1 if self.accelerator > self.PRESS_THRESHOLD else 0 )
self.device.emit( uinput.KEY_DOWN, self.brake )
self.device.emit( uinput.KEY_UP, self.high_beams )
self.device.emit( uinput.KEY_ENTER, 1 if t < self.cruise_t + self.LATCH_TIME else 0 )
return
CONTROLLERS = {
'joystick': Mazda3Joystick,
'grim': Mazda3Grim,
'descent': Mazda3Descent,
'doom': Mazda3Doom,
'dos': Mazda3DOS,
'sonic': Mazda3Sonic,
}
if __name__ == '__main__':
usage = 'Usage: %prog [options]'
parser = OptParser( epilog='Protocols supported by the ELM327:\n{}'.format( PROTOCOLS ) )
parser.add_option( '-g', '--game', dest='game', help='Game configuration to use (choices: {})'.format( ' '.join( CONTROLLERS.keys() ) ) )
parser.add_option( '-d', '--device', dest='device', help='Path to ELM327 serial device' )
parser.add_option( '-b', '--baudrate', dest='baud_rate', help='Baud rate' )
parser.add_option( '-p', '--protocol', dest='protocol', help='ELM327 message protocol to use' )
(options, argv) = parser.parse_args()
args = {}
controller_type = 'joystick'
if options.game and options.game in CONTROLLERS:
controller_type = options.game
elif len( argv ) >= 1 and argv[0] in CONTROLLERS:
controller_type = argv[0]
controller = CONTROLLERS[controller_type]()
if options.device:
args['device'] = options.device
elif len( argv ) >= 2:
args['device'] = argv[1]
if options.baud_rate:
args['baud_rate'] = options.baud_rate
elif len( argv ) >= 3:
args['baud_rate'] = argv[2]
if options.protocol:
args['protocol'] = options.protocol
elif len( argv ) >= 4:
args['protocol'] = argv[3]
elm = ELM327( **args )
elm.reset()
elm.set_can_whitelist( [0x4da, 0x201, 0x205, 0x4ec, 0x433] )
elm.start_can()
try:
while True:
msg_id, msg_b = elm.recv_can()
if msg_id >= 0:
controller.update( msg_id, msg_b )
else:
print('-- Miss: {}'.format( msg_b ))
except EOFError:
print('-- Hit the end')
except KeyboardInterrupt:
pass
elm.get_prompt()
| 30.167683 | 141 | 0.597979 |
import uinput
from elm327 import ELM327, PROTOCOLS
from mrcrowbar import models as mrc
import math
import time
from optparse import OptionParser
class OptParser( OptionParser ):
def format_epilog( self, formatter ):
return '\n{}\n'.format( '\n'.join( [formatter._format_text( x ) for x in self.epilog.split( '\n' )] ) )
class Steering( mrc.Block ):
RANGE = 0x00D2
axis_raw = mrc.UInt16_BE( 0x00 )
@property
def axis( self ):
return min( max( (255*(self.axis_raw - 0x8000)//self.RANGE), -255 ), 255 )
class Accelerator( mrc.Block ):
RANGE = 0xC8
axis_raw = mrc.UInt8( 0x06 )
@property
def axis( self ):
return min( max( (255*(self.axis_raw)//self.RANGE), 0 ), 255 )
class Brake( mrc.Block ):
button = mrc.Bits( 0x02, 0b01000000 )
class Cruise( mrc.Block ):
button = mrc.Bits( 0x00, 0b10000000 )
class Controls( mrc.Block ):
driver_door = mrc.Bits( 0x00, 0b10000000 )
high_beams = mrc.Bits( 0x03, 0b01000000 )
class Mazda3:
LATCH_TIME = 0.1
PRESS_THRESHOLD = 32
STEER_THRESHOLD = 64
SHOVE_THRESHOLD = 128
def __init__( self, name, mapping ):
print( 'Creating uinput device "{}"...'.format( name ) )
self.device = uinput.Device( mapping, name )
self.steering = 0
self.accelerator = 0
self.brake = 0
self.high_beams = 0
self.cruise_t = self.driver_door_t = time.time() + self.LATCH_TIME
self.cruise = 0
self.driver_door = 0
self.cruise_prev = 0
self.driver_door_prev = 0
def update( self, msg_id, msg_b ):
t = time.time()
self.cruise_prev = self.cruise
self.driver_door_prev = self.driver_door
if msg_id == 0x4da:
self.steering = Steering( msg_b ).axis
elif msg_id == 0x201:
self.accelerator = Accelerator( msg_b ).axis
elif msg_id == 0x205:
self.brake = Brake( msg_b ).button
elif msg_id == 0x4ec:
self.cruise = Cruise( msg_b ).button
elif msg_id == 0x433:
obj = Controls( msg_b )
self.high_beams = obj.high_beams
self.driver_door = obj.driver_door
else:
return
if self.cruise != self.cruise_prev:
self.cruise_t = t
if self.driver_door != self.driver_door_prev:
self.driver_door_t = t
self.set_controls()
return
def set_controls( self ):
pass
class Mazda3Joystick( Mazda3 ):
NAME = 'Mazda 3 Joystick'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (0, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3
]
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.ABS_WHEEL, self.steering )
self.device.emit( uinput.ABS_GAS, self.accelerator )
self.device.emit( uinput.BTN_0, self.brake )
self.device.emit( uinput.BTN_1, self.high_beams )
self.device.emit( uinput.BTN_2, 1 if t < (self.cruise_t + self.LATCH_TIME) else 0 )
self.device.emit( uinput.BTN_3, 1 if t < (self.driver_door_t + self.LATCH_TIME) else 0 )
return
class Mazda3Doom( Mazda3Joystick ):
NAME = 'Mazda 3 Doom'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (-255, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3
]
class Mazda3DOS( Mazda3Joystick ):
NAME = 'Mazda 3 DOS'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (-255, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3
]
def set_controls( self ):
t = time.time()
self.device.emit( uinput.ABS_WHEEL, self.steering )
self.device.emit( uinput.ABS_GAS, self.accelerator*2-255 )
self.device.emit( uinput.BTN_0, self.brake )
self.device.emit( uinput.BTN_1, self.high_beams )
self.device.emit( uinput.BTN_2, 1 if t < (self.cruise_t + self.LATCH_TIME) else 0 )
self.device.emit( uinput.BTN_3, 1 if t < (self.driver_door_t + self.LATCH_TIME) else 0 )
return
class Mazda3Descent( Mazda3 ):
NAME = 'Mazda 3 Descent'
DEVICE = [
uinput.ABS_WHEEL + (-255, 255, 0, 0),
uinput.ABS_GAS + (-255, 255, 0, 0),
uinput.BTN_0,
uinput.BTN_1,
uinput.BTN_2,
uinput.BTN_3,
uinput.KEY_UP,
uinput.KEY_DOWN
]
DOUBLE_TAP = 0.5
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
self.high_beams_prev = 0
self.high_beams_t = time.time()
self.high_beams_key = uinput.KEY_DOWN
def update( self, msg_id, msg_b ):
t = time.time()
self.high_beams_prev = self.high_beams
super().update( msg_id, msg_b )
if self.high_beams != self.high_beams_prev:
if self.high_beams:
self.high_beams_key = uinput.KEY_UP if (t - self.high_beams_t < self.DOUBLE_TAP) else uinput.KEY_DOWN
self.device.emit( self.high_beams_key, 1 )
self.high_beams_t = t
else:
self.device.emit( self.high_beams_key, 0 )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.ABS_WHEEL, self.steering )
self.device.emit( uinput.ABS_GAS, self.accelerator )
self.device.emit( uinput.BTN_0, self.brake )
self.device.emit( uinput.BTN_2, 1 if t < (self.cruise_t + self.LATCH_TIME) else 0 )
self.device.emit( uinput.BTN_3, 1 if t < (self.driver_door_t + self.LATCH_TIME) else 0 )
return
class Mazda3Grim( Mazda3 ):
NAME = 'Mazda 3 Grim Fandango'
DEVICE = [
uinput.KEY_LEFT,
uinput.KEY_UP,
uinput.KEY_RIGHT,
uinput.KEY_U,
uinput.KEY_LEFTSHIFT,
uinput.KEY_E,
uinput.KEY_P,
uinput.KEY_I
]
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.KEY_LEFT, 1 if self.steering < -self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_RIGHT, 1 if self.steering > self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_UP, 1 if self.accelerator > self.PRESS_THRESHOLD else 0 )
self.device.emit( uinput.KEY_LEFTSHIFT, 1 if self.accelerator > self.SHOVE_THRESHOLD else 0 )
self.device.emit( uinput.KEY_U, self.brake )
self.device.emit( uinput.KEY_E, self.high_beams )
self.device.emit( uinput.KEY_P, 1 if t < self.cruise_t + self.LATCH_TIME else 0 )
self.device.emit( uinput.KEY_I, 1 if t < self.driver_door_t + self.LATCH_TIME else 0 )
return
class Mazda3Sonic( Mazda3 ):
NAME = 'Mazda 3 Sonic'
DEVICE = [
uinput.KEY_LEFT,
uinput.KEY_UP,
uinput.KEY_RIGHT,
uinput.KEY_DOWN,
uinput.KEY_Z,
uinput.KEY_ENTER
]
def __init__( self ):
super().__init__( name=self.NAME, mapping=self.DEVICE )
def set_controls( self ):
t = time.time()
self.device.emit( uinput.KEY_LEFT, 1 if self.steering < -self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_RIGHT, 1 if self.steering > self.STEER_THRESHOLD else 0 )
self.device.emit( uinput.KEY_Z, 1 if self.accelerator > self.PRESS_THRESHOLD else 0 )
self.device.emit( uinput.KEY_DOWN, self.brake )
self.device.emit( uinput.KEY_UP, self.high_beams )
self.device.emit( uinput.KEY_ENTER, 1 if t < self.cruise_t + self.LATCH_TIME else 0 )
return
CONTROLLERS = {
'joystick': Mazda3Joystick,
'grim': Mazda3Grim,
'descent': Mazda3Descent,
'doom': Mazda3Doom,
'dos': Mazda3DOS,
'sonic': Mazda3Sonic,
}
if __name__ == '__main__':
usage = 'Usage: %prog [options]'
parser = OptParser( epilog='Protocols supported by the ELM327:\n{}'.format( PROTOCOLS ) )
parser.add_option( '-g', '--game', dest='game', help='Game configuration to use (choices: {})'.format( ' '.join( CONTROLLERS.keys() ) ) )
parser.add_option( '-d', '--device', dest='device', help='Path to ELM327 serial device' )
parser.add_option( '-b', '--baudrate', dest='baud_rate', help='Baud rate' )
parser.add_option( '-p', '--protocol', dest='protocol', help='ELM327 message protocol to use' )
(options, argv) = parser.parse_args()
args = {}
controller_type = 'joystick'
if options.game and options.game in CONTROLLERS:
controller_type = options.game
elif len( argv ) >= 1 and argv[0] in CONTROLLERS:
controller_type = argv[0]
controller = CONTROLLERS[controller_type]()
if options.device:
args['device'] = options.device
elif len( argv ) >= 2:
args['device'] = argv[1]
if options.baud_rate:
args['baud_rate'] = options.baud_rate
elif len( argv ) >= 3:
args['baud_rate'] = argv[2]
if options.protocol:
args['protocol'] = options.protocol
elif len( argv ) >= 4:
args['protocol'] = argv[3]
elm = ELM327( **args )
elm.reset()
elm.set_can_whitelist( [0x4da, 0x201, 0x205, 0x4ec, 0x433] )
elm.start_can()
try:
while True:
msg_id, msg_b = elm.recv_can()
if msg_id >= 0:
controller.update( msg_id, msg_b )
else:
print('-- Miss: {}'.format( msg_b ))
except EOFError:
print('-- Hit the end')
except KeyboardInterrupt:
pass
elm.get_prompt()
| true | true |
f720a09f334fcd690a947282fa271733c27fe7d4 | 218 | py | Python | WriteaFunction.py | jibinmathew691993/PythonHackerrank | 14ab5b620435a006d5ccff17536bc01acd7c22dc | [
"MIT"
] | null | null | null | WriteaFunction.py | jibinmathew691993/PythonHackerrank | 14ab5b620435a006d5ccff17536bc01acd7c22dc | [
"MIT"
] | null | null | null | WriteaFunction.py | jibinmathew691993/PythonHackerrank | 14ab5b620435a006d5ccff17536bc01acd7c22dc | [
"MIT"
] | null | null | null | def is_leap(year):
leap = False
if year>=1900:
if year%4 == 0:
leap = True
if year%100 == 0 and year%400 != 0:
leap = False
return leap
year = int(input()) | 18.166667 | 47 | 0.463303 | def is_leap(year):
leap = False
if year>=1900:
if year%4 == 0:
leap = True
if year%100 == 0 and year%400 != 0:
leap = False
return leap
year = int(input()) | true | true |
f720a191c0da487593b1eee10de1b8b4af27c2df | 2,276 | py | Python | pyecharts/charts/basic_charts/scatter.py | myqf555/pyecharts | 050309ee3d2016142df3e2265a091e27aa58a027 | [
"MIT"
] | 1 | 2020-02-13T14:48:20.000Z | 2020-02-13T14:48:20.000Z | pyecharts/charts/basic_charts/scatter.py | eclipse2007/pyecharts | 651731a1a5220420a9a03808d2f5eb38ffe18e09 | [
"MIT"
] | null | null | null | pyecharts/charts/basic_charts/scatter.py | eclipse2007/pyecharts | 651731a1a5220420a9a03808d2f5eb38ffe18e09 | [
"MIT"
] | 1 | 2020-09-12T05:55:48.000Z | 2020-09-12T05:55:48.000Z | import itertools
from ... import options as opts
from ... import types
from ...charts.chart import RectChart
from ...globals import ChartType
class Scatter(RectChart):
"""
<<< Scatter >>>
The scatter diagram on the rectangular coordinate system can be used to
show the relationship between x and y of the data. If the data item has
multiple dimensions, it can be represented by color, and the
visualmap component can be used.
"""
def add_yaxis(
self,
series_name: str,
y_axis: types.Sequence,
*,
is_selected: bool = True,
xaxis_index: types.Optional[types.Numeric] = None,
yaxis_index: types.Optional[types.Numeric] = None,
color: types.Optional[str] = None,
symbol: types.Optional[str] = None,
symbol_size: types.Union[types.Numeric, types.Sequence] = 10,
symbol_rotate: types.Optional[types.Numeric] = None,
label_opts: types.Label = opts.LabelOpts(position="right"),
markpoint_opts: types.MarkPoint = None,
markline_opts: types.MarkLine = None,
tooltip_opts: types.Tooltip = None,
itemstyle_opts: types.ItemStyle = None,
):
self._append_color(color)
self._append_legend(series_name, is_selected)
if len(y_axis) > 0 and isinstance(y_axis[0], types.Sequence):
data = [
list(itertools.chain(list([x]), y))
for x, y in zip(self._xaxis_data, y_axis)
]
else:
data = [list(z) for z in zip(self._xaxis_data, y_axis)]
self.options.get("series").append(
{
"type": ChartType.SCATTER,
"name": series_name,
"xAxisIndex": xaxis_index,
"yAxisIndex": yaxis_index,
"symbol": symbol,
"symbolSize": symbol_size,
"symbolRotate": symbol_rotate,
"data": data,
"label": label_opts,
"markPoint": markpoint_opts,
"markLine": markline_opts,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
}
)
return self
| 35.5625 | 76 | 0.560193 | import itertools
from ... import options as opts
from ... import types
from ...charts.chart import RectChart
from ...globals import ChartType
class Scatter(RectChart):
def add_yaxis(
self,
series_name: str,
y_axis: types.Sequence,
*,
is_selected: bool = True,
xaxis_index: types.Optional[types.Numeric] = None,
yaxis_index: types.Optional[types.Numeric] = None,
color: types.Optional[str] = None,
symbol: types.Optional[str] = None,
symbol_size: types.Union[types.Numeric, types.Sequence] = 10,
symbol_rotate: types.Optional[types.Numeric] = None,
label_opts: types.Label = opts.LabelOpts(position="right"),
markpoint_opts: types.MarkPoint = None,
markline_opts: types.MarkLine = None,
tooltip_opts: types.Tooltip = None,
itemstyle_opts: types.ItemStyle = None,
):
self._append_color(color)
self._append_legend(series_name, is_selected)
if len(y_axis) > 0 and isinstance(y_axis[0], types.Sequence):
data = [
list(itertools.chain(list([x]), y))
for x, y in zip(self._xaxis_data, y_axis)
]
else:
data = [list(z) for z in zip(self._xaxis_data, y_axis)]
self.options.get("series").append(
{
"type": ChartType.SCATTER,
"name": series_name,
"xAxisIndex": xaxis_index,
"yAxisIndex": yaxis_index,
"symbol": symbol,
"symbolSize": symbol_size,
"symbolRotate": symbol_rotate,
"data": data,
"label": label_opts,
"markPoint": markpoint_opts,
"markLine": markline_opts,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
}
)
return self
| true | true |
f720a2030d77959bb04a88ba16fda69fd042f960 | 8,410 | py | Python | LichessBotMain.py | nfeddersen/lichess-bot | 6457b5f66104b59b91316ba3a944b55710ca64e5 | [
"MIT"
] | null | null | null | LichessBotMain.py | nfeddersen/lichess-bot | 6457b5f66104b59b91316ba3a944b55710ca64e5 | [
"MIT"
] | null | null | null | LichessBotMain.py | nfeddersen/lichess-bot | 6457b5f66104b59b91316ba3a944b55710ca64e5 | [
"MIT"
] | 1 | 2021-07-12T14:11:04.000Z | 2021-07-12T14:11:04.000Z | import requests
import json
from stockfish import Stockfish
stockfish = Stockfish('stockfish_20090216_x64_bmi2.exe', parameters={"Threads": 8, "Minimum Thinking Time": 300})
stockfish.set_depth(15)
stockfish.set_skill_level(25)
api_key = 'REPLACE_WITH_API_KEY'
headers = {'Authorization': f'Bearer {api_key}'}
game_state_url = 'https://lichess.org/api/stream/event'
game_id = 'placeholder'
is_checkmate = False
bot_challenges = False
while True:
state_session = requests.Session()
request = state_session.get(game_state_url, headers=headers, stream=True)
for line in request.iter_lines():
if len(line) == 0:
print('Request response is empty.')
if len(line) != 0:
challenge_state_json = json.loads(line)
if challenge_state_json['type'] == 'challenge':
print('BOT_NAME has been challenged.')
challenge_id = challenge_state_json['challenge']['id']
challenger = challenge_state_json['challenge']['challenger']['id']
print('Challenge ID is: ' + challenge_id + '. Challenger is: ' + challenger)
if challenge_state_json['challenge']['variant']['key'] != 'standard':
requests.post('https://lichess.org/api/challenge/' + challenge_id + '/decline', params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
print('Challenge has been declined for improper variant.')
continue
else:
requests.post('https://lichess.org/api/challenge/' + challenge_id + '/accept', params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
current_move = 'placeholder'
best_move = 'placeholder'
position = ['placeholder', 'placeholder']
black_position = ['placeholder', 'placeholder']
white = True
second_session = requests.Session()
request = second_session.get(game_state_url, headers=headers, stream=True)
for line in request.iter_lines():
game_start_json = json.loads(line)
print(game_start_json)
game_id = game_start_json['game']['id']
print('Game ID is: ' + game_id)
break
game_stream_url = 'https://lichess.org/api/bot/game/stream/' + game_id
bot_move_url = 'https://lichess.org/api/bot/game/' + game_id + '/move/'
s = requests.Session()
r = s.get(game_stream_url, headers=headers, stream=True)
i = 0
move_count = 0
for line in r.iter_lines():
if line:
i = i + 1
move_count = move_count + .5
move_count = float(move_count)
if move_count.is_integer():
move_count = int(move_count)
move_count = str(move_count)
print('It is move ' + move_count + '.')
move_count = float(move_count)
start_json = json.loads(line)
if i == 1:
if start_json["white"]["id"] == 'REPLACE_WITH_BOT_USERNAME':
white = True
print('It is white to move. I am white.')
else:
white = False
print('It is white to move. I am black.')
if start_json['speed'] == 'bullet' and i == 1:
stockfish.set_depth(15)
stockfish.set_skill_level(20)
elif start_json['speed'] == 'blitz' and i == 1:
stockfish.set_depth(15)
stockfish.set_skill_level(25)
elif start_json['speed'] == 'rapid' and i == 1:
stockfish.set_depth(19)
stockfish.set_skill_level(30)
elif start_json['speed'] == 'classical' and i == 1:
stockfish.set_depth(20)
stockfish.set_skill_level(30)
elif start_json['speed'] == 'correspondence' and i == 1:
stockfish.set_depth(20)
stockfish.set_skill_level(30)
if white and i == 1:
position.clear()
stockfish.set_position()
best_move = stockfish.get_best_move()
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
best_move = str(best_move)
position.append(best_move)
stockfish.set_position(position)
if not white and i == 1:
print('I am waiting to move.')
if not white and i == 2:
black_position.clear()
current_move = start_json["moves"]
current_move = str(current_move)
current_move = current_move.split()
current_move = current_move[-1]
black_position.append(current_move)
stockfish.set_position(black_position)
best_move = stockfish.get_best_move()
black_position.append(best_move)
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
if start_json['type'] == 'gameState':
# If bot is white and first move has already been played
if not i % 2 == 0 and white and i > 1:
current_move = start_json["moves"]
current_move = str(current_move)
current_move = current_move.split()
current_move = current_move[-1]
position.append(current_move)
stockfish.set_position(position)
best_move = stockfish.get_best_move()
position.append(best_move)
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
# If bot is black and first move has been played
if not white and i % 2 == 0 and i > 2:
current_move = start_json["moves"]
current_move = str(current_move)
current_move = current_move.split()
current_move = current_move[-1]
black_position.append(current_move)
stockfish.set_position(black_position)
best_move = stockfish.get_best_move()
black_position.append(best_move)
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
else:
print('I am waiting to move.')
continue
| 46.464088 | 114 | 0.437574 | import requests
import json
from stockfish import Stockfish
stockfish = Stockfish('stockfish_20090216_x64_bmi2.exe', parameters={"Threads": 8, "Minimum Thinking Time": 300})
stockfish.set_depth(15)
stockfish.set_skill_level(25)
api_key = 'REPLACE_WITH_API_KEY'
headers = {'Authorization': f'Bearer {api_key}'}
game_state_url = 'https://lichess.org/api/stream/event'
game_id = 'placeholder'
is_checkmate = False
bot_challenges = False
while True:
state_session = requests.Session()
request = state_session.get(game_state_url, headers=headers, stream=True)
for line in request.iter_lines():
if len(line) == 0:
print('Request response is empty.')
if len(line) != 0:
challenge_state_json = json.loads(line)
if challenge_state_json['type'] == 'challenge':
print('BOT_NAME has been challenged.')
challenge_id = challenge_state_json['challenge']['id']
challenger = challenge_state_json['challenge']['challenger']['id']
print('Challenge ID is: ' + challenge_id + '. Challenger is: ' + challenger)
if challenge_state_json['challenge']['variant']['key'] != 'standard':
requests.post('https://lichess.org/api/challenge/' + challenge_id + '/decline', params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
print('Challenge has been declined for improper variant.')
continue
else:
requests.post('https://lichess.org/api/challenge/' + challenge_id + '/accept', params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
current_move = 'placeholder'
best_move = 'placeholder'
position = ['placeholder', 'placeholder']
black_position = ['placeholder', 'placeholder']
white = True
second_session = requests.Session()
request = second_session.get(game_state_url, headers=headers, stream=True)
for line in request.iter_lines():
game_start_json = json.loads(line)
print(game_start_json)
game_id = game_start_json['game']['id']
print('Game ID is: ' + game_id)
break
game_stream_url = 'https://lichess.org/api/bot/game/stream/' + game_id
bot_move_url = 'https://lichess.org/api/bot/game/' + game_id + '/move/'
s = requests.Session()
r = s.get(game_stream_url, headers=headers, stream=True)
i = 0
move_count = 0
for line in r.iter_lines():
if line:
i = i + 1
move_count = move_count + .5
move_count = float(move_count)
if move_count.is_integer():
move_count = int(move_count)
move_count = str(move_count)
print('It is move ' + move_count + '.')
move_count = float(move_count)
start_json = json.loads(line)
if i == 1:
if start_json["white"]["id"] == 'REPLACE_WITH_BOT_USERNAME':
white = True
print('It is white to move. I am white.')
else:
white = False
print('It is white to move. I am black.')
if start_json['speed'] == 'bullet' and i == 1:
stockfish.set_depth(15)
stockfish.set_skill_level(20)
elif start_json['speed'] == 'blitz' and i == 1:
stockfish.set_depth(15)
stockfish.set_skill_level(25)
elif start_json['speed'] == 'rapid' and i == 1:
stockfish.set_depth(19)
stockfish.set_skill_level(30)
elif start_json['speed'] == 'classical' and i == 1:
stockfish.set_depth(20)
stockfish.set_skill_level(30)
elif start_json['speed'] == 'correspondence' and i == 1:
stockfish.set_depth(20)
stockfish.set_skill_level(30)
if white and i == 1:
position.clear()
stockfish.set_position()
best_move = stockfish.get_best_move()
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
best_move = str(best_move)
position.append(best_move)
stockfish.set_position(position)
if not white and i == 1:
print('I am waiting to move.')
if not white and i == 2:
black_position.clear()
current_move = start_json["moves"]
current_move = str(current_move)
current_move = current_move.split()
current_move = current_move[-1]
black_position.append(current_move)
stockfish.set_position(black_position)
best_move = stockfish.get_best_move()
black_position.append(best_move)
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
if start_json['type'] == 'gameState':
if not i % 2 == 0 and white and i > 1:
current_move = start_json["moves"]
current_move = str(current_move)
current_move = current_move.split()
current_move = current_move[-1]
position.append(current_move)
stockfish.set_position(position)
best_move = stockfish.get_best_move()
position.append(best_move)
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
if not white and i % 2 == 0 and i > 2:
current_move = start_json["moves"]
current_move = str(current_move)
current_move = current_move.split()
current_move = current_move[-1]
black_position.append(current_move)
stockfish.set_position(black_position)
best_move = stockfish.get_best_move()
black_position.append(best_move)
requests.post(bot_move_url + best_move, params={
},
headers={
'Authorization': f'Bearer {api_key}'
})
else:
print('I am waiting to move.')
continue
| true | true |
f720a21b297857bd9d0f8b0c056695ecb84a54fe | 1,510 | py | Python | pyArango/index.py | jarvisav/pyArango | dc054e2258c9fccbc54443afc244b74ad0abb8b0 | [
"Apache-2.0"
] | null | null | null | pyArango/index.py | jarvisav/pyArango | dc054e2258c9fccbc54443afc244b74ad0abb8b0 | [
"Apache-2.0"
] | null | null | null | pyArango/index.py | jarvisav/pyArango | dc054e2258c9fccbc54443afc244b74ad0abb8b0 | [
"Apache-2.0"
] | null | null | null | import json
from .theExceptions import (CreationError, DeletionError, UpdateError)
class Index(object) :
"""An index on a collection's fields. Indexes are meant to de created by ensureXXX functions of Collections.
Indexes have a .infos dictionary that stores all the infos about the index"""
def __init__(self, collection, infos = None, creationData = None) :
self.collection = collection
self.connection = self.collection.database.connection
self.indexesURL = "%s/index" % self.collection.database.URL
self.infos = None
if infos :
self.infos = infos
elif creationData :
self._create(creationData)
if self.infos :
self.URL = "%s/%s" % (self.indexesURL, self.infos["id"])
def _create(self, postData) :
"""Creates an index of any type according to postData"""
if self.infos is None :
r = self.connection.session.post(self.indexesURL, params = {"collection" : self.collection.name}, data = json.dumps(postData, default=str))
data = r.json()
if (r.status_code >= 400) or data['error'] :
raise CreationError(data['errorMessage'], data)
self.infos = data
def delete(self) :
"""Delete the index"""
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or data['error'] :
raise DeletionError(data['errorMessage'], data)
| 40.810811 | 151 | 0.625828 | import json
from .theExceptions import (CreationError, DeletionError, UpdateError)
class Index(object) :
def __init__(self, collection, infos = None, creationData = None) :
self.collection = collection
self.connection = self.collection.database.connection
self.indexesURL = "%s/index" % self.collection.database.URL
self.infos = None
if infos :
self.infos = infos
elif creationData :
self._create(creationData)
if self.infos :
self.URL = "%s/%s" % (self.indexesURL, self.infos["id"])
def _create(self, postData) :
if self.infos is None :
r = self.connection.session.post(self.indexesURL, params = {"collection" : self.collection.name}, data = json.dumps(postData, default=str))
data = r.json()
if (r.status_code >= 400) or data['error'] :
raise CreationError(data['errorMessage'], data)
self.infos = data
def delete(self) :
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or data['error'] :
raise DeletionError(data['errorMessage'], data)
| true | true |
f720a26b353f6b3ddd1548566f5b0a972c16828a | 584 | py | Python | soleka.py | jshenaop/eko | bb8e96ee9e460ed10505c8046a444a24fdcfcd06 | [
"Apache-2.0"
] | null | null | null | soleka.py | jshenaop/eko | bb8e96ee9e460ed10505c8046a444a24fdcfcd06 | [
"Apache-2.0"
] | null | null | null | soleka.py | jshenaop/eko | bb8e96ee9e460ed10505c8046a444a24fdcfcd06 | [
"Apache-2.0"
] | null | null | null | # coding=utf8
from flask import Flask, render_template
from flask_restful.utils import cors
from flask_cors import CORS, cross_origin
import config
import models
from resources_v1.predictions import predictions_api_v1
from templates.templates import home
app = Flask(__name__)
CORS(app)
app.register_blueprint(predictions_api_v1, url_prefix='/api/v1')
@app.route('/')
def index():
return render_template("main.html")
if __name__ == '__main__':
models.initilize()
#app.run(host=config.HOST)
app.run(debug=config.DEBUG, host=config.HOST)
| 22.461538 | 65 | 0.738014 |
from flask import Flask, render_template
from flask_restful.utils import cors
from flask_cors import CORS, cross_origin
import config
import models
from resources_v1.predictions import predictions_api_v1
from templates.templates import home
app = Flask(__name__)
CORS(app)
app.register_blueprint(predictions_api_v1, url_prefix='/api/v1')
@app.route('/')
def index():
return render_template("main.html")
if __name__ == '__main__':
models.initilize()
app.run(debug=config.DEBUG, host=config.HOST)
| true | true |
f720a3476873e92b2ed94d5e14b117589ae55b83 | 23,418 | py | Python | index.py | 201723050210/17wanxiaoCheckin-Actions | 666a2c8473f876a607011021a8be1ee876ce6c34 | [
"MIT"
] | null | null | null | index.py | 201723050210/17wanxiaoCheckin-Actions | 666a2c8473f876a607011021a8be1ee876ce6c34 | [
"MIT"
] | null | null | null | index.py | 201723050210/17wanxiaoCheckin-Actions | 666a2c8473f876a607011021a8be1ee876ce6c34 | [
"MIT"
] | null | null | null | import time
import os
import datetime
import json
import logging
import requests
from utils.server_chan import server_push
from utils.qq_email import qq_email_push
from utils.qmsg import qmsg_push
from login import CampusLogin
def initLogging():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format="[%(levelname)s]; %(message)s")
def get_token(username, password, device_id):
"""
获取用户令牌,模拟登录获取:https://github.com/zhongbr/wanmei_campus
:param device_id: 设备ID
:param username: 账号
:param password: 密码
:return:
"""
for _ in range(3):
try:
campus_login = CampusLogin(phone_num=username, device_id=device_id)
except Exception as e:
logging.warning(e)
continue
login_dict = campus_login.pwd_login(password)
if login_dict["status"]:
logging.info(f"{username[:4]},{login_dict['msg']}")
return login_dict["token"]
elif login_dict['errmsg'] == "该手机号未注册完美校园":
logging.warning(f"{username[:4]},{login_dict['errmsg']}")
return None
elif login_dict['errmsg'].startswith("密码错误"):
logging.warning(f"{username[:4]},{login_dict['errmsg']}")
logging.warning("代码是死的,密码错误了就是错误了,赶紧去查看一下是不是输错了!")
return None
else:
logging.info(f"{username[:4]},{login_dict['errmsg']}")
logging.warning('正在尝试重新登录......')
time.sleep(5)
return None
def get_school_name(token):
post_data = {"token": token, "method": "WX_BASE_INFO", "param": "%7B%7D"}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
try:
res = requests.post(
"https://server.59wanmei.com/YKT_Interface/xyk",
data=post_data,
headers=headers,
)
return res.json()["data"]["customerName"]
except:
return "泪目,没获取到学校名字"
def get_user_info(token):
"""
用来获取custom_id,即类似与打卡模板id
:param token: 用户令牌
:return: return
"""
data = {"appClassify": "DK", "token": token}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/getUserInfo", data=data
)
user_info = res.json()["userInfo"]
logging.info('获取个人信息成功')
return user_info
except:
logging.warning('获取个人信息失败,正在重试......')
time.sleep(1)
return None
def get_post_json(post_json):
"""
获取打卡数据
:param jsons: 用来获取打卡数据的json字段
:return:
"""
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/sass/api/epmpics",
json=post_json,
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] != "10000":
logging.warning(res)
data = json.loads(res["data"])
# print(data)
post_dict = {
"areaStr": data['areaStr'],
"deptStr": data['deptStr'],
"deptid": data['deptStr']['deptid'] if data['deptStr'] else None,
"customerid": data['customerid'],
"userid": data['userid'],
"username": data['username'],
"stuNo": data['stuNo'],
"phonenum": data["phonenum"],
"templateid": data["templateid"],
"updatainfo": [
{"propertyname": i["propertyname"], "value": i["value"]}
for i in data["cusTemplateRelations"]
],
"updatainfo_detail": [
{
"propertyname": i["propertyname"],
"checkValues": i["checkValues"],
"description": i["decription"],
"value": i["value"],
}
for i in data["cusTemplateRelations"]
],
"checkbox": [
{"description": i["decription"], "value": i["value"], "propertyname": i["propertyname"]}
for i in data["cusTemplateRelations"]
],
}
# print(json.dumps(post_dict, sort_keys=True, indent=4, ensure_ascii=False))
logging.info("获取完美校园打卡post参数成功")
return post_dict
return None
def healthy_check_in(token, username, post_dict):
"""
第一类健康打卡
:param username: 手机号
:param token: 用户令牌
:param post_dict: 打卡数据
:return:
"""
check_json = {
"businessType": "epmpics",
"method": "submitUpInfo",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"gpsType": 1,
"token": token,
},
}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
if res['code'] == '10000':
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
elif "频繁" in res['data']:
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
else:
logging.warning(res)
return {"status": 0, "errmsg": f"{post_dict['username']}: {res}"}
except:
errmsg = f"```打卡请求出错```"
logging.warning("健康打卡请求出错")
return {"status": 0, "errmsg": errmsg}
return {"status": 0, "errmsg": "健康打卡请求出错"}
def get_recall_data(token):
"""
获取第二类健康打卡的打卡数据
:param token: 用户令牌
:return: 返回dict数据
"""
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/api/reported/recall",
data={"token": token},
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] == 0:
logging.info("获取完美校园打卡post参数成功")
return res["data"]
else:
logging.warning(res)
return None
def receive_check_in(token, custom_id, post_dict):
"""
第二类健康打卡
:param token: 用户令牌
:param custom_id: 健康打卡id
:param post_dict: 健康打卡数据
:return:
"""
check_json = {
"userId": post_dict["userId"],
"name": post_dict["name"],
"stuNo": post_dict["stuNo"],
"whereabouts": post_dict["whereabouts"],
"familyWhereabouts": "",
"beenToWuhan": post_dict["beenToWuhan"],
"contactWithPatients": post_dict["contactWithPatients"],
"symptom": post_dict["symptom"],
"fever": post_dict["fever"],
"cough": post_dict["cough"],
"soreThroat": post_dict["soreThroat"],
"debilitation": post_dict["debilitation"],
"diarrhea": post_dict["diarrhea"],
"cold": post_dict["cold"],
"staySchool": post_dict["staySchool"],
"contacts": post_dict["contacts"],
"emergencyPhone": post_dict["emergencyPhone"],
"address": post_dict["address"],
"familyForAddress": "",
"collegeId": post_dict["collegeId"],
"majorId": post_dict["majorId"],
"classId": post_dict["classId"],
"classDescribe": post_dict["classDescribe"],
"temperature": post_dict["temperature"],
"confirmed": post_dict["confirmed"],
"isolated": post_dict["isolated"],
"passingWuhan": post_dict["passingWuhan"],
"passingHubei": post_dict["passingHubei"],
"patientSide": post_dict["patientSide"],
"patientContact": post_dict["patientContact"],
"mentalHealth": post_dict["mentalHealth"],
"wayToSchool": post_dict["wayToSchool"],
"backToSchool": post_dict["backToSchool"],
"haveBroadband": post_dict["haveBroadband"],
"emergencyContactName": post_dict["emergencyContactName"],
"helpInfo": "",
"passingCity": "",
"longitude": "", # 请在此处填写需要打卡位置的longitude
"latitude": "", # 请在此处填写需要打卡位置的latitude
"token": token,
}
headers = {
"referer": f"https://reportedh5.17wanxiao.com/nCovReport/index.html?token={token}&customerId={custom_id}",
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/reported/receive",
headers=headers,
data=check_json,
).json()
# 以json格式打印json字符串
# print(res)
if res["code"] == 0:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
else:
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
except:
errmsg = f"```打卡请求出错```"
logging.warning("打卡请求出错,网络不稳定")
return dict(status=0, errmsg=errmsg)
def get_ap():
"""
获取当前时间,用于校内打卡
:return: 返回布尔列表:[am, pm, ev]
"""
now_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
am = 0 <= now_time.hour < 12
pm = 12 <= now_time.hour < 17
ev = 17 <= now_time.hour <= 23
return [am, pm, ev]
def get_id_list(token, custom_id):
"""
通过校内模板id获取校内打卡具体的每个时间段id
:param token: 用户令牌
:param custom_id: 校内打卡模板id
:return: 返回校内打卡id列表
"""
post_data = {
"customerAppTypeId": custom_id,
"longitude": "",
"latitude": "",
"token": token,
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/rules", data=post_data
)
# print(res.text)
return res.json()["customerAppTypeDto"]["ruleList"]
except:
return None
def get_id_list_v1(token):
"""
通过校内模板id获取校内打卡具体的每个时间段id(初版,暂留)
:param token: 用户令牌
:return: 返回校内打卡id列表
"""
post_data = {"appClassify": "DK", "token": token}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/childApps",
data=post_data,
)
if res.json()["appList"]:
id_list = sorted(
res.json()["appList"][-1]["customerAppTypeRuleList"],
key=lambda x: x["id"],
)
res_dict = [
{"id": j["id"], "templateid": f"clockSign{i + 1}"}
for i, j in enumerate(id_list)
]
return res_dict
return None
except:
return None
def campus_check_in(username, token, post_dict, id):
"""
校内打卡
:param username: 电话号
:param token: 用户令牌
:param post_dict: 校内打卡数据
:param id: 校内打卡id
:return:
"""
check_json = {
"businessType": "epmpics",
"method": "submitUpInfoSchool",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"customerAppTypeRuleId": id,
"clockState": 0,
"token": token,
},
"token": token,
}
# print(check_json)
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
# 以json格式打印json字符串
if res["code"] != "10000":
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
else:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
except BaseException:
errmsg = f"```校内打卡请求出错```"
logging.warning("校内打卡请求出错")
return dict(status=0, errmsg=errmsg)
def check_in(username, password, device_id):
check_dict_list = []
# 登录获取token用于打卡
token = get_token(username, password, device_id)
if not token:
errmsg = f"{username[:4]},获取token失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
# print(token)
# 获取现在是上午,还是下午,还是晚上
# ape_list = get_ap()
# 获取学校使用打卡模板Id
user_info = get_user_info(token)
if not user_info:
errmsg = f"{username[:4]},获取user_info失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
# 获取第一类健康打卡的参数
json1 = {
"businessType": "epmpics",
"jsonData": {"templateid": "pneumonia", "token": token},
"method": "userComeApp",
}
post_dict = get_post_json(json1)
if post_dict:
# 第一类健康打卡
# print(post_dict)
# 修改温度等参数
# for j in post_dict['updatainfo']: # 这里获取打卡json字段的打卡信息,微信推送的json字段
# if j['propertyname'] == 'temperature': # 找到propertyname为temperature的字段
# j['value'] = '36.2' # 由于原先为null,这里直接设置36.2(根据自己学校打卡选项来)
# if j['propertyname'] == 'xinqing':
# j['value'] = '健康'
# if j['propertyname'] == 'outdoor':
# j['value'] = '否'
# if j['propertyname'] == 'symptom':
# j['value'] = '无症状'
# if j['propertyname'] == 'ownbodyzk':
# j['value'] = '身体健康,无异常'
# 修改地址,依照自己完美校园,查一下地址即可
# post_dict['areaStr'] = '{"streetNumber":"89号","street":"建设东路","district":"","city":"新乡市","province":"河南省",' \
# '"town":"","pois":"河南师范大学(东区)","lng":113.91572178314209,' \
# '"lat":35.327695868943984,"address":"牧野区建设东路89号河南师范大学(东区)","text":"河南省-新乡市",' \
# '"code":""} '
healthy_check_dict = healthy_check_in(token, username, post_dict)
check_dict_list.append(healthy_check_dict)
else:
# 获取第二类健康打卡参数
post_dict = get_recall_data(token)
# 第二类健康打卡
healthy_check_dict = receive_check_in(token, user_info["customerId"], post_dict)
check_dict_list.append(healthy_check_dict)
# # 获取校内打卡ID
# id_list = get_id_list(token, user_info.get('customerAppTypeId'))
# # print(id_list)
# if not id_list:
# return check_dict_list
#
# # 校内打卡
# for index, i in enumerate(id_list):
# if ape_list[index]:
# # print(i)
# logging.info(f"-------------------------------{i['templateid']}-------------------------------")
# json2 = {"businessType": "epmpics",
# "jsonData": {"templateid": i['templateid'], "customerAppTypeRuleId": i['id'],
# "stuNo": post_dict['stuNo'],
# "token": token}, "method": "userComeAppSchool",
# "token": token}
# campus_dict = get_post_json(json2)
# campus_dict['areaStr'] = post_dict['areaStr']
# for j in campus_dict['updatainfo']:
# if j['propertyname'] == 'temperature':
# j['value'] = '36.4'
# if j['propertyname'] == 'symptom':
# j['value'] = '无症状'
# campus_check_dict = campus_check_in(username, token, campus_dict, i['id'])
# check_dict_list.append(campus_check_dict)
# logging.info("--------------------------------------------------------------")
return check_dict_list
def wanxiao_server_push(sckey, check_info_list):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f"""
------
#### 现在时间:
```
{utc8_time.strftime("%Y-%m-%d %H:%M:%S %p")}
```"""]
for check_info in check_info_list:
if check_info["status"]:
if check_info["post_dict"].get("checkbox"):
post_msg = "\n".join(
[
f"| {i['description']} | {j['value']} |"
for i in check_info["post_dict"].get("checkbox")
for j in check_info["post_dict"].get("updatainfo")
if i["propertyname"] == j["propertyname"]
]
)
else:
post_msg = "暂无详情"
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(
f"""#### {name}{check_info['type']}打卡信息:
```
{json.dumps(check_info['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
```
------
| Text | Message |
| :----------------------------------- | :--- |
{post_msg}
------
```
{check_info['res']}
```"""
)
else:
push_list.append(
f"""------
#### {check_info['errmsg']}
------
"""
)
push_list.append(
f"""
>
> [17wanxiaoCheckin-Actions](https://github.com/ReaJason/17wanxiaoCheckin-Actions)
>
>微信消息测试!
"""
)
return server_push(sckey, "健康打卡", "\n".join(push_list))
def wanxiao_qq_mail_push(send_email, send_pwd, receive_email, check_info_list):
bj_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
bj_time.strftime("%Y-%m-%d %H:%M:%S %p")
mail_msg_list = [f"""
<h2><center> >>>> <a href="https://github.com/ReaJason/17wanxiaoCheckin-Actions">17wanxiaoCheckin-Actions</a>
<<<<</center></h2>
<h2><center>微信消息提醒!</center></h2>
<h3><center>打卡时间:{bj_time}</center></h3>
"""
]
for check in check_info_list:
if check["status"]:
name = check['post_dict'].get('username')
if not name:
name = check['post_dict']['name']
mail_msg_list.append(f"""<hr>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: deepskyblue;">{name}:{check["type"]} 打卡结果:{check['res']}</summary>
<pre><code>
{json.dumps(check['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: black;" >>>>填写数据抓包详情(便于代码的编写)<<<</summary>
<pre><code>
{json.dumps(check['post_dict']['updatainfo_detail'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: lightskyblue;" >>>>打卡信息数据表格<<<</summary>
<table id="customers">
<tr>
<th>Text</th>
<th>Value</th>
</tr>
"""
)
for index, box in enumerate(check["post_dict"]["checkbox"]):
if index % 2:
mail_msg_list.append(
f"""<tr>
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
else:
mail_msg_list.append(f"""<tr class="alt">
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
mail_msg_list.append(
f"""
</table></details>"""
)
else:
mail_msg_list.append(
f"""<hr>
<b style="color: red">{check['errmsg']}</b>"""
)
css = """<style type="text/css">
#customers
{
font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
width:100%;
border-collapse:collapse;
}
#customers td, #customers th
{
font-size:1em;
border:1px solid #98bf21;
padding:3px 7px 2px 7px;
}
#customers th
{
font-size:1.1em;
text-align:left;
padding-top:5px;
padding-bottom:4px;
background-color:#A7C942;
color:#ffffff;
}
#customers tr.alt td
{
color:#000000;
background-color:#EAF2D3;
}
</style>"""
mail_msg_list.append(css)
return qq_email_push(send_email, send_pwd, receive_email,
title="完美校园健康打卡", text="".join(mail_msg_list))
def wanxiao_qmsg_push(key, qq_num, check_info_list, send_type):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f'@face=74@ {utc8_time.strftime("%Y-%m-%d %H:%M:%S")} @face=74@ ']
for check_info in check_info_list:
if check_info["status"]:
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(f"""\
@face=54@ {name}{check_info['type']} @face=54@
@face=211@
{check_info['res']}
@face=211@""")
else:
push_list.append(check_info['errmsg'])
return qmsg_push(key, qq_num, "\n".join(push_list), send_type)
def main_handler(*args, **kwargs):
initLogging()
raw_info = []
username_list = os.environ['USERNAME'].split(',')
password_list = os.environ['PASSWORD'].split(',')
device_id_list = os.environ['DEVICEID'].split(',')
sckey = os.environ.get('SCKEY')
key = os.environ.get('KEY')
qq_num = os.environ.get('QQ_NUM')
send_email = os.environ.get('SEND_EMAIL')
send_pwd = os.environ.get('SEND_PWD')
receive_email = os.environ.get('RECEIVE_EMAIL')
for username, password, device_id in zip(
[i.strip() for i in username_list if i != ''],
[i.strip() for i in password_list if i != ''],
[i.strip() for i in device_id_list if i != '']):
check_dict = check_in(username, password, device_id)
raw_info.extend(check_dict)
if sckey:
logging.info(wanxiao_server_push(sckey, raw_info))
if send_email and send_pwd and receive_email:
logging.info(wanxiao_qq_mail_push(send_email, send_pwd, receive_email, raw_info))
if key:
logging.info(wanxiao_qmsg_push(key, qq_num, raw_info, send_type="send"))
if __name__ == "__main__":
main_handler()
| 32.211829 | 130 | 0.530575 | import time
import os
import datetime
import json
import logging
import requests
from utils.server_chan import server_push
from utils.qq_email import qq_email_push
from utils.qmsg import qmsg_push
from login import CampusLogin
def initLogging():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format="[%(levelname)s]; %(message)s")
def get_token(username, password, device_id):
for _ in range(3):
try:
campus_login = CampusLogin(phone_num=username, device_id=device_id)
except Exception as e:
logging.warning(e)
continue
login_dict = campus_login.pwd_login(password)
if login_dict["status"]:
logging.info(f"{username[:4]},{login_dict['msg']}")
return login_dict["token"]
elif login_dict['errmsg'] == "该手机号未注册完美校园":
logging.warning(f"{username[:4]},{login_dict['errmsg']}")
return None
elif login_dict['errmsg'].startswith("密码错误"):
logging.warning(f"{username[:4]},{login_dict['errmsg']}")
logging.warning("代码是死的,密码错误了就是错误了,赶紧去查看一下是不是输错了!")
return None
else:
logging.info(f"{username[:4]},{login_dict['errmsg']}")
logging.warning('正在尝试重新登录......')
time.sleep(5)
return None
def get_school_name(token):
post_data = {"token": token, "method": "WX_BASE_INFO", "param": "%7B%7D"}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
try:
res = requests.post(
"https://server.59wanmei.com/YKT_Interface/xyk",
data=post_data,
headers=headers,
)
return res.json()["data"]["customerName"]
except:
return "泪目,没获取到学校名字"
def get_user_info(token):
data = {"appClassify": "DK", "token": token}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/getUserInfo", data=data
)
user_info = res.json()["userInfo"]
logging.info('获取个人信息成功')
return user_info
except:
logging.warning('获取个人信息失败,正在重试......')
time.sleep(1)
return None
def get_post_json(post_json):
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/sass/api/epmpics",
json=post_json,
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] != "10000":
logging.warning(res)
data = json.loads(res["data"])
post_dict = {
"areaStr": data['areaStr'],
"deptStr": data['deptStr'],
"deptid": data['deptStr']['deptid'] if data['deptStr'] else None,
"customerid": data['customerid'],
"userid": data['userid'],
"username": data['username'],
"stuNo": data['stuNo'],
"phonenum": data["phonenum"],
"templateid": data["templateid"],
"updatainfo": [
{"propertyname": i["propertyname"], "value": i["value"]}
for i in data["cusTemplateRelations"]
],
"updatainfo_detail": [
{
"propertyname": i["propertyname"],
"checkValues": i["checkValues"],
"description": i["decription"],
"value": i["value"],
}
for i in data["cusTemplateRelations"]
],
"checkbox": [
{"description": i["decription"], "value": i["value"], "propertyname": i["propertyname"]}
for i in data["cusTemplateRelations"]
],
}
logging.info("获取完美校园打卡post参数成功")
return post_dict
return None
def healthy_check_in(token, username, post_dict):
check_json = {
"businessType": "epmpics",
"method": "submitUpInfo",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"gpsType": 1,
"token": token,
},
}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
if res['code'] == '10000':
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
elif "频繁" in res['data']:
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
else:
logging.warning(res)
return {"status": 0, "errmsg": f"{post_dict['username']}: {res}"}
except:
errmsg = f"```打卡请求出错```"
logging.warning("健康打卡请求出错")
return {"status": 0, "errmsg": errmsg}
return {"status": 0, "errmsg": "健康打卡请求出错"}
def get_recall_data(token):
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/api/reported/recall",
data={"token": token},
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] == 0:
logging.info("获取完美校园打卡post参数成功")
return res["data"]
else:
logging.warning(res)
return None
def receive_check_in(token, custom_id, post_dict):
check_json = {
"userId": post_dict["userId"],
"name": post_dict["name"],
"stuNo": post_dict["stuNo"],
"whereabouts": post_dict["whereabouts"],
"familyWhereabouts": "",
"beenToWuhan": post_dict["beenToWuhan"],
"contactWithPatients": post_dict["contactWithPatients"],
"symptom": post_dict["symptom"],
"fever": post_dict["fever"],
"cough": post_dict["cough"],
"soreThroat": post_dict["soreThroat"],
"debilitation": post_dict["debilitation"],
"diarrhea": post_dict["diarrhea"],
"cold": post_dict["cold"],
"staySchool": post_dict["staySchool"],
"contacts": post_dict["contacts"],
"emergencyPhone": post_dict["emergencyPhone"],
"address": post_dict["address"],
"familyForAddress": "",
"collegeId": post_dict["collegeId"],
"majorId": post_dict["majorId"],
"classId": post_dict["classId"],
"classDescribe": post_dict["classDescribe"],
"temperature": post_dict["temperature"],
"confirmed": post_dict["confirmed"],
"isolated": post_dict["isolated"],
"passingWuhan": post_dict["passingWuhan"],
"passingHubei": post_dict["passingHubei"],
"patientSide": post_dict["patientSide"],
"patientContact": post_dict["patientContact"],
"mentalHealth": post_dict["mentalHealth"],
"wayToSchool": post_dict["wayToSchool"],
"backToSchool": post_dict["backToSchool"],
"haveBroadband": post_dict["haveBroadband"],
"emergencyContactName": post_dict["emergencyContactName"],
"helpInfo": "",
"passingCity": "",
"longitude": "",
"latitude": "",
"token": token,
}
headers = {
"referer": f"https://reportedh5.17wanxiao.com/nCovReport/index.html?token={token}&customerId={custom_id}",
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/reported/receive",
headers=headers,
data=check_json,
).json()
if res["code"] == 0:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
else:
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
except:
errmsg = f"```打卡请求出错```"
logging.warning("打卡请求出错,网络不稳定")
return dict(status=0, errmsg=errmsg)
def get_ap():
now_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
am = 0 <= now_time.hour < 12
pm = 12 <= now_time.hour < 17
ev = 17 <= now_time.hour <= 23
return [am, pm, ev]
def get_id_list(token, custom_id):
post_data = {
"customerAppTypeId": custom_id,
"longitude": "",
"latitude": "",
"token": token,
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/rules", data=post_data
)
return res.json()["customerAppTypeDto"]["ruleList"]
except:
return None
def get_id_list_v1(token):
post_data = {"appClassify": "DK", "token": token}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/childApps",
data=post_data,
)
if res.json()["appList"]:
id_list = sorted(
res.json()["appList"][-1]["customerAppTypeRuleList"],
key=lambda x: x["id"],
)
res_dict = [
{"id": j["id"], "templateid": f"clockSign{i + 1}"}
for i, j in enumerate(id_list)
]
return res_dict
return None
except:
return None
def campus_check_in(username, token, post_dict, id):
check_json = {
"businessType": "epmpics",
"method": "submitUpInfoSchool",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"customerAppTypeRuleId": id,
"clockState": 0,
"token": token,
},
"token": token,
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
if res["code"] != "10000":
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
else:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
except BaseException:
errmsg = f"```校内打卡请求出错```"
logging.warning("校内打卡请求出错")
return dict(status=0, errmsg=errmsg)
def check_in(username, password, device_id):
check_dict_list = []
token = get_token(username, password, device_id)
if not token:
errmsg = f"{username[:4]},获取token失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
user_info = get_user_info(token)
if not user_info:
errmsg = f"{username[:4]},获取user_info失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
json1 = {
"businessType": "epmpics",
"jsonData": {"templateid": "pneumonia", "token": token},
"method": "userComeApp",
}
post_dict = get_post_json(json1)
if post_dict:
healthy_check_dict = healthy_check_in(token, username, post_dict)
check_dict_list.append(healthy_check_dict)
else:
post_dict = get_recall_data(token)
healthy_check_dict = receive_check_in(token, user_info["customerId"], post_dict)
check_dict_list.append(healthy_check_dict)
return check_dict_list
def wanxiao_server_push(sckey, check_info_list):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f"""
------
#### 现在时间:
```
{utc8_time.strftime("%Y-%m-%d %H:%M:%S %p")}
```"""]
for check_info in check_info_list:
if check_info["status"]:
if check_info["post_dict"].get("checkbox"):
post_msg = "\n".join(
[
f"| {i['description']} | {j['value']} |"
for i in check_info["post_dict"].get("checkbox")
for j in check_info["post_dict"].get("updatainfo")
if i["propertyname"] == j["propertyname"]
]
)
else:
post_msg = "暂无详情"
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(
f"""#### {name}{check_info['type']}打卡信息:
```
{json.dumps(check_info['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
```
------
| Text | Message |
| :----------------------------------- | :--- |
{post_msg}
------
```
{check_info['res']}
```"""
)
else:
push_list.append(
f"""------
#### {check_info['errmsg']}
------
"""
)
push_list.append(
f"""
>
> [17wanxiaoCheckin-Actions](https://github.com/ReaJason/17wanxiaoCheckin-Actions)
>
>微信消息测试!
"""
)
return server_push(sckey, "健康打卡", "\n".join(push_list))
def wanxiao_qq_mail_push(send_email, send_pwd, receive_email, check_info_list):
bj_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
bj_time.strftime("%Y-%m-%d %H:%M:%S %p")
mail_msg_list = [f"""
<h2><center> >>>> <a href="https://github.com/ReaJason/17wanxiaoCheckin-Actions">17wanxiaoCheckin-Actions</a>
<<<<</center></h2>
<h2><center>微信消息提醒!</center></h2>
<h3><center>打卡时间:{bj_time}</center></h3>
"""
]
for check in check_info_list:
if check["status"]:
name = check['post_dict'].get('username')
if not name:
name = check['post_dict']['name']
mail_msg_list.append(f"""<hr>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: deepskyblue;">{name}:{check["type"]} 打卡结果:{check['res']}</summary>
<pre><code>
{json.dumps(check['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: black;" >>>>填写数据抓包详情(便于代码的编写)<<<</summary>
<pre><code>
{json.dumps(check['post_dict']['updatainfo_detail'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: lightskyblue;" >>>>打卡信息数据表格<<<</summary>
<table id="customers">
<tr>
<th>Text</th>
<th>Value</th>
</tr>
"""
)
for index, box in enumerate(check["post_dict"]["checkbox"]):
if index % 2:
mail_msg_list.append(
f"""<tr>
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
else:
mail_msg_list.append(f"""<tr class="alt">
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
mail_msg_list.append(
f"""
</table></details>"""
)
else:
mail_msg_list.append(
f"""<hr>
<b style="color: red">{check['errmsg']}</b>"""
)
css = """<style type="text/css">
#customers
{
font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
width:100%;
border-collapse:collapse;
}
#customers td, #customers th
{
font-size:1em;
border:1px solid #98bf21;
padding:3px 7px 2px 7px;
}
#customers th
{
font-size:1.1em;
text-align:left;
padding-top:5px;
padding-bottom:4px;
background-color:#A7C942;
color:#ffffff;
}
#customers tr.alt td
{
color:#000000;
background-color:#EAF2D3;
}
</style>"""
mail_msg_list.append(css)
return qq_email_push(send_email, send_pwd, receive_email,
title="完美校园健康打卡", text="".join(mail_msg_list))
def wanxiao_qmsg_push(key, qq_num, check_info_list, send_type):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f'@face=74@ {utc8_time.strftime("%Y-%m-%d %H:%M:%S")} @face=74@ ']
for check_info in check_info_list:
if check_info["status"]:
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(f"""\
@face=54@ {name}{check_info['type']} @face=54@
@face=211@
{check_info['res']}
@face=211@""")
else:
push_list.append(check_info['errmsg'])
return qmsg_push(key, qq_num, "\n".join(push_list), send_type)
def main_handler(*args, **kwargs):
initLogging()
raw_info = []
username_list = os.environ['USERNAME'].split(',')
password_list = os.environ['PASSWORD'].split(',')
device_id_list = os.environ['DEVICEID'].split(',')
sckey = os.environ.get('SCKEY')
key = os.environ.get('KEY')
qq_num = os.environ.get('QQ_NUM')
send_email = os.environ.get('SEND_EMAIL')
send_pwd = os.environ.get('SEND_PWD')
receive_email = os.environ.get('RECEIVE_EMAIL')
for username, password, device_id in zip(
[i.strip() for i in username_list if i != ''],
[i.strip() for i in password_list if i != ''],
[i.strip() for i in device_id_list if i != '']):
check_dict = check_in(username, password, device_id)
raw_info.extend(check_dict)
if sckey:
logging.info(wanxiao_server_push(sckey, raw_info))
if send_email and send_pwd and receive_email:
logging.info(wanxiao_qq_mail_push(send_email, send_pwd, receive_email, raw_info))
if key:
logging.info(wanxiao_qmsg_push(key, qq_num, raw_info, send_type="send"))
if __name__ == "__main__":
main_handler()
| true | true |
f720a3b2937b6b9bd9e76101573c229ffea21101 | 913 | py | Python | tabu/package_info.py | wbernoudy/dwave-tabu | 793e76405ba60d2da87bc15634adeda821d82564 | [
"Apache-2.0"
] | null | null | null | tabu/package_info.py | wbernoudy/dwave-tabu | 793e76405ba60d2da87bc15634adeda821d82564 | [
"Apache-2.0"
] | null | null | null | tabu/package_info.py | wbernoudy/dwave-tabu | 793e76405ba60d2da87bc15634adeda821d82564 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__packagename__ = 'dwave-tabu'
__title__ = 'D-Wave Tabu'
__version__ = '0.1.3'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'tools@dwavesys.com'
__description__ = 'Optimized Tabu solver for QUBOs'
__url__ = 'https://github.com/dwavesystems/dwave-tabu'
__license__ = 'Apache 2.0'
__copyright__ = '2018, D-Wave Systems Inc.'
| 38.041667 | 74 | 0.756846 |
__packagename__ = 'dwave-tabu'
__title__ = 'D-Wave Tabu'
__version__ = '0.1.3'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'tools@dwavesys.com'
__description__ = 'Optimized Tabu solver for QUBOs'
__url__ = 'https://github.com/dwavesystems/dwave-tabu'
__license__ = 'Apache 2.0'
__copyright__ = '2018, D-Wave Systems Inc.'
| true | true |
f720a3be847c19fa4b9c7affc33b5d31a4209713 | 276 | py | Python | EyeTracker/display.py | PoweredByME/SSVEP_FYP | 6839be6a4aeddfa1b29c587b23d64c95a90810f8 | [
"MIT"
] | null | null | null | EyeTracker/display.py | PoweredByME/SSVEP_FYP | 6839be6a4aeddfa1b29c587b23d64c95a90810f8 | [
"MIT"
] | null | null | null | EyeTracker/display.py | PoweredByME/SSVEP_FYP | 6839be6a4aeddfa1b29c587b23d64c95a90810f8 | [
"MIT"
] | null | null | null | import cv2;
class Display(object):
def __init__(self):
pass;
def showFrame(self, frame, windowName = "frame"):
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL);
cv2.imshow(windowName, frame);
def end(self):
cv2.destroyAllWindows(); | 23 | 55 | 0.637681 | import cv2;
class Display(object):
def __init__(self):
pass;
def showFrame(self, frame, windowName = "frame"):
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL);
cv2.imshow(windowName, frame);
def end(self):
cv2.destroyAllWindows(); | true | true |
f720a3c8713f607158450310ae4112fcb026294a | 6,344 | py | Python | simple_history/manager.py | felixschloesser/django-simple-history | 28abacb8a776fbaffcf0a42432a6a88be3561a86 | [
"BSD-3-Clause"
] | 2 | 2021-03-26T09:20:05.000Z | 2021-05-26T13:46:48.000Z | simple_history/manager.py | felixschloesser/django-simple-history | 28abacb8a776fbaffcf0a42432a6a88be3561a86 | [
"BSD-3-Clause"
] | 42 | 2021-03-30T11:12:23.000Z | 2022-03-28T09:20:13.000Z | simple_history/manager.py | hramezani/django-simple-history | 32645206749a1cc68539d9ad6499f1a938b2c9f4 | [
"BSD-3-Clause"
] | 1 | 2021-10-05T10:25:35.000Z | 2021-10-05T10:25:35.000Z | from django.db import connection, models
from django.db.models import OuterRef, Subquery
from django.utils import timezone
from simple_history.utils import get_change_reason_from_object
class HistoryDescriptor:
def __init__(self, model):
self.model = model
def __get__(self, instance, owner):
if instance is None:
return HistoryManager(self.model)
return HistoryManager(self.model, instance)
class HistoryManager(models.Manager):
def __init__(self, model, instance=None):
super(HistoryManager, self).__init__()
self.model = model
self.instance = instance
def get_super_queryset(self):
return super(HistoryManager, self).get_queryset()
def get_queryset(self):
qs = self.get_super_queryset()
if self.instance is None:
return qs
if isinstance(self.instance._meta.pk, models.ForeignKey):
key_name = self.instance._meta.pk.name + "_id"
else:
key_name = self.instance._meta.pk.name
return self.get_super_queryset().filter(**{key_name: self.instance.pk})
def most_recent(self):
"""
Returns the most recent copy of the instance available in the history.
"""
if not self.instance:
raise TypeError(
"Can't use most_recent() without a {} instance.".format(
self.model._meta.object_name
)
)
tmp = []
excluded_fields = getattr(self.model, "_history_excluded_fields", [])
for field in self.instance._meta.fields:
if field.name in excluded_fields:
continue
if isinstance(field, models.ForeignKey):
tmp.append(field.name + "_id")
else:
tmp.append(field.name)
fields = tuple(tmp)
try:
values = self.get_queryset().values(*fields)[0]
except IndexError:
raise self.instance.DoesNotExist(
"%s has no historical record." % self.instance._meta.object_name
)
return self.instance.__class__(**values)
def as_of(self, date):
"""Get a snapshot as of a specific date.
Returns an instance, or an iterable of the instances, of the
original model with all the attributes set according to what
was present on the object on the date provided.
"""
if not self.instance:
return self._as_of_set(date)
queryset = self.get_queryset().filter(history_date__lte=date)
try:
history_obj = queryset[0]
except IndexError:
raise self.instance.DoesNotExist(
"%s had not yet been created." % self.instance._meta.object_name
)
if history_obj.history_type == "-":
raise self.instance.DoesNotExist(
"%s had already been deleted." % self.instance._meta.object_name
)
return history_obj.instance
def _as_of_set(self, date):
model = type(self.model().instance) # a bit of a hack to get the model
pk_attr = model._meta.pk.name
queryset = self.get_queryset().filter(history_date__lte=date)
# If using MySQL, need to get a list of IDs in memory and then use them for the
# second query.
# Does mean two loops through the DB to get the full set, but still a speed
# improvement.
backend = connection.vendor
if backend == "mysql":
history_ids = {}
for item in queryset.order_by("-history_date", "-pk"):
if getattr(item, pk_attr) not in history_ids:
history_ids[getattr(item, pk_attr)] = item.pk
latest_historics = queryset.filter(history_id__in=history_ids.values())
elif backend == "postgresql":
latest_pk_attr_historic_ids = (
queryset.order_by(pk_attr, "-history_date", "-pk")
.distinct(pk_attr)
.values_list("pk", flat=True)
)
latest_historics = queryset.filter(
history_id__in=latest_pk_attr_historic_ids
)
else:
latest_pk_attr_historic_ids = (
queryset.filter(**{pk_attr: OuterRef(pk_attr)})
.order_by("-history_date", "-pk")
.values("pk")[:1]
)
latest_historics = queryset.filter(
history_id__in=Subquery(latest_pk_attr_historic_ids)
)
adjusted = latest_historics.exclude(history_type="-").order_by(pk_attr)
for historic_item in adjusted:
yield historic_item.instance
def bulk_history_create(
self,
objs,
batch_size=None,
update=False,
default_user=None,
default_change_reason="",
default_date=None,
):
"""
Bulk create the history for the objects specified by objs.
If called by bulk_update_with_history, use the update boolean and
save the history_type accordingly.
"""
history_type = "+"
if update:
history_type = "~"
historical_instances = []
for instance in objs:
history_user = getattr(
instance,
"_history_user",
default_user or self.model.get_default_history_user(instance),
)
row = self.model(
history_date=getattr(
instance, "_history_date", default_date or timezone.now()
),
history_user=history_user,
history_change_reason=get_change_reason_from_object(instance)
or default_change_reason,
history_type=history_type,
**{
field.attname: getattr(instance, field.attname)
for field in instance._meta.fields
if field.name not in self.model._history_excluded_fields
},
)
if hasattr(self.model, "history_relation"):
row.history_relation_id = instance.pk
historical_instances.append(row)
return self.model.objects.bulk_create(
historical_instances, batch_size=batch_size
)
| 36.67052 | 87 | 0.585908 | from django.db import connection, models
from django.db.models import OuterRef, Subquery
from django.utils import timezone
from simple_history.utils import get_change_reason_from_object
class HistoryDescriptor:
def __init__(self, model):
self.model = model
def __get__(self, instance, owner):
if instance is None:
return HistoryManager(self.model)
return HistoryManager(self.model, instance)
class HistoryManager(models.Manager):
def __init__(self, model, instance=None):
super(HistoryManager, self).__init__()
self.model = model
self.instance = instance
def get_super_queryset(self):
return super(HistoryManager, self).get_queryset()
def get_queryset(self):
qs = self.get_super_queryset()
if self.instance is None:
return qs
if isinstance(self.instance._meta.pk, models.ForeignKey):
key_name = self.instance._meta.pk.name + "_id"
else:
key_name = self.instance._meta.pk.name
return self.get_super_queryset().filter(**{key_name: self.instance.pk})
def most_recent(self):
if not self.instance:
raise TypeError(
"Can't use most_recent() without a {} instance.".format(
self.model._meta.object_name
)
)
tmp = []
excluded_fields = getattr(self.model, "_history_excluded_fields", [])
for field in self.instance._meta.fields:
if field.name in excluded_fields:
continue
if isinstance(field, models.ForeignKey):
tmp.append(field.name + "_id")
else:
tmp.append(field.name)
fields = tuple(tmp)
try:
values = self.get_queryset().values(*fields)[0]
except IndexError:
raise self.instance.DoesNotExist(
"%s has no historical record." % self.instance._meta.object_name
)
return self.instance.__class__(**values)
def as_of(self, date):
if not self.instance:
return self._as_of_set(date)
queryset = self.get_queryset().filter(history_date__lte=date)
try:
history_obj = queryset[0]
except IndexError:
raise self.instance.DoesNotExist(
"%s had not yet been created." % self.instance._meta.object_name
)
if history_obj.history_type == "-":
raise self.instance.DoesNotExist(
"%s had already been deleted." % self.instance._meta.object_name
)
return history_obj.instance
def _as_of_set(self, date):
model = type(self.model().instance) # a bit of a hack to get the model
pk_attr = model._meta.pk.name
queryset = self.get_queryset().filter(history_date__lte=date)
# If using MySQL, need to get a list of IDs in memory and then use them for the
# second query.
# Does mean two loops through the DB to get the full set, but still a speed
# improvement.
backend = connection.vendor
if backend == "mysql":
history_ids = {}
for item in queryset.order_by("-history_date", "-pk"):
if getattr(item, pk_attr) not in history_ids:
history_ids[getattr(item, pk_attr)] = item.pk
latest_historics = queryset.filter(history_id__in=history_ids.values())
elif backend == "postgresql":
latest_pk_attr_historic_ids = (
queryset.order_by(pk_attr, "-history_date", "-pk")
.distinct(pk_attr)
.values_list("pk", flat=True)
)
latest_historics = queryset.filter(
history_id__in=latest_pk_attr_historic_ids
)
else:
latest_pk_attr_historic_ids = (
queryset.filter(**{pk_attr: OuterRef(pk_attr)})
.order_by("-history_date", "-pk")
.values("pk")[:1]
)
latest_historics = queryset.filter(
history_id__in=Subquery(latest_pk_attr_historic_ids)
)
adjusted = latest_historics.exclude(history_type="-").order_by(pk_attr)
for historic_item in adjusted:
yield historic_item.instance
def bulk_history_create(
self,
objs,
batch_size=None,
update=False,
default_user=None,
default_change_reason="",
default_date=None,
):
history_type = "+"
if update:
history_type = "~"
historical_instances = []
for instance in objs:
history_user = getattr(
instance,
"_history_user",
default_user or self.model.get_default_history_user(instance),
)
row = self.model(
history_date=getattr(
instance, "_history_date", default_date or timezone.now()
),
history_user=history_user,
history_change_reason=get_change_reason_from_object(instance)
or default_change_reason,
history_type=history_type,
**{
field.attname: getattr(instance, field.attname)
for field in instance._meta.fields
if field.name not in self.model._history_excluded_fields
},
)
if hasattr(self.model, "history_relation"):
row.history_relation_id = instance.pk
historical_instances.append(row)
return self.model.objects.bulk_create(
historical_instances, batch_size=batch_size
)
| true | true |
f720a3de70f0386fe46cbec6ae3539699c8d83d8 | 5,653 | py | Python | sdk/python/pulumi_azure_native/botservice/v20171201/get_bot_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/botservice/v20171201/get_bot_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/botservice/v20171201/get_bot_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBotConnectionResult',
'AwaitableGetBotConnectionResult',
'get_bot_connection',
]
@pulumi.output_type
class GetBotConnectionResult:
"""
Bot channel resource definition
"""
def __init__(__self__, etag=None, id=None, kind=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Entity Tag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Required. Gets or sets the Kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ConnectionSettingPropertiesResponse':
"""
The set of properties specific to bot channel resource
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Gets or sets the SKU of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetBotConnectionResult(GetBotConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBotConnectionResult(
etag=self.etag,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_bot_connection(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBotConnectionResult:
"""
Bot channel resource definition
:param str connection_name: The name of the Bot Service Connection Setting resource
:param str resource_group_name: The name of the Bot resource group in the user subscription.
:param str resource_name: The name of the Bot resource.
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:botservice/v20171201:getBotConnection', __args__, opts=opts, typ=GetBotConnectionResult).value
return AwaitableGetBotConnectionResult(
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 32.488506 | 144 | 0.618433 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBotConnectionResult',
'AwaitableGetBotConnectionResult',
'get_bot_connection',
]
@pulumi.output_type
class GetBotConnectionResult:
def __init__(__self__, etag=None, id=None, kind=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ConnectionSettingPropertiesResponse':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetBotConnectionResult(GetBotConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBotConnectionResult(
etag=self.etag,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_bot_connection(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBotConnectionResult:
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:botservice/v20171201:getBotConnection', __args__, opts=opts, typ=GetBotConnectionResult).value
return AwaitableGetBotConnectionResult(
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| true | true |
f720a41fe1294d6a6a4f0a5fcf613dac72c9f8da | 9,425 | py | Python | pulsar-functions/instance/src/main/python/function_stats.py | bruth/pulsar | fe2c8ee4d37e2a45dfb528592915746827416e18 | [
"Apache-2.0"
] | null | null | null | pulsar-functions/instance/src/main/python/function_stats.py | bruth/pulsar | fe2c8ee4d37e2a45dfb528592915746827416e18 | [
"Apache-2.0"
] | null | null | null | pulsar-functions/instance/src/main/python/function_stats.py | bruth/pulsar | fe2c8ee4d37e2a45dfb528592915746827416e18 | [
"Apache-2.0"
] | 1 | 2019-03-15T09:34:50.000Z | 2019-03-15T09:34:50.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import traceback
import time
import util
from prometheus_client import Counter, Summary, Gauge
# We keep track of the following metrics
class Stats(object):
metrics_label_names = ['tenant', 'namespace', 'function', 'instance_id', 'cluster']
PULSAR_FUNCTION_METRICS_PREFIX = "pulsar_function_"
USER_METRIC_PREFIX = "user_metric_";
TOTAL_PROCESSED = 'processed_total'
TOTAL_SUCCESSFULLY_PROCESSED = 'processed_successfully_total'
TOTAL_SYSTEM_EXCEPTIONS = 'system_exceptions_total'
TOTAL_USER_EXCEPTIONS = 'user_exceptions_total'
PROCESS_LATENCY_MS = 'process_latency_ms'
LAST_INVOCATION = 'last_invocation'
TOTAL_RECEIVED = 'received_total'
TOTAL_PROCESSED_1min = 'processed_total_1min'
TOTAL_SUCCESSFULLY_PROCESSED_1min = 'processed_successfully_total_1min'
TOTAL_SYSTEM_EXCEPTIONS_1min = 'system_exceptions_total_1min'
TOTAL_USER_EXCEPTIONS_1min = 'user_exceptions_total_1min'
PROCESS_LATENCY_MS_1min = 'process_latency_ms_1min'
TOTAL_RECEIVED_1min = 'received_total_1min'
# Declare Prometheus
stat_total_processed = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_PROCESSED, 'Total number of messages processed.', metrics_label_names)
stat_total_processed_successfully = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SUCCESSFULLY_PROCESSED,
'Total number of messages processed successfully.', metrics_label_names)
stat_total_sys_exceptions = Counter(PULSAR_FUNCTION_METRICS_PREFIX+ TOTAL_SYSTEM_EXCEPTIONS, 'Total number of system exceptions.',
metrics_label_names)
stat_total_user_exceptions = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_USER_EXCEPTIONS, 'Total number of user exceptions.',
metrics_label_names)
stat_process_latency_ms = Summary(PULSAR_FUNCTION_METRICS_PREFIX + PROCESS_LATENCY_MS, 'Process latency in milliseconds.', metrics_label_names)
stat_last_invocation = Gauge(PULSAR_FUNCTION_METRICS_PREFIX + LAST_INVOCATION, 'The timestamp of the last invocation of the function.', metrics_label_names)
stat_total_received = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_RECEIVED, 'Total number of messages received from source.', metrics_label_names)
# 1min windowed metrics
stat_total_processed_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_PROCESSED_1min,
'Total number of messages processed in the last 1 minute.', metrics_label_names)
stat_total_processed_successfully_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SUCCESSFULLY_PROCESSED_1min,
'Total number of messages processed successfully in the last 1 minute.', metrics_label_names)
stat_total_sys_exceptions_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SYSTEM_EXCEPTIONS_1min,
'Total number of system exceptions in the last 1 minute.',
metrics_label_names)
stat_total_user_exceptions_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_USER_EXCEPTIONS_1min,
'Total number of user exceptions in the last 1 minute.',
metrics_label_names)
stat_process_latency_ms_1min = Summary(PULSAR_FUNCTION_METRICS_PREFIX + PROCESS_LATENCY_MS_1min,
'Process latency in milliseconds in the last 1 minute.', metrics_label_names)
stat_total_received_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_RECEIVED_1min,
'Total number of messages received from source in the last 1 minute.', metrics_label_names)
latest_user_exception = []
latest_sys_exception = []
def __init__(self, metrics_labels):
self.metrics_labels = metrics_labels;
self.process_start_time = None
# start time for windowed metrics
util.FixedTimer(60, self.reset).start()
def get_total_received(self):
return self.stat_total_received.labels(*self.metrics_labels)._value.get();
def get_total_processed(self):
return self.stat_total_processed.labels(*self.metrics_labels)._value.get();
def get_total_processed_successfully(self):
return self.stat_total_processed_successfully.labels(*self.metrics_labels)._value.get();
def get_total_sys_exceptions(self):
return self.stat_total_sys_exceptions.labels(*self.metrics_labels)._value.get();
def get_total_user_exceptions(self):
return self.stat_total_user_exceptions.labels(*self.metrics_labels)._value.get();
def get_avg_process_latency(self):
process_latency_ms_count = self.stat_process_latency_ms.labels(*self.metrics_labels)._count.get()
process_latency_ms_sum = self.stat_process_latency_ms.labels(*self.metrics_labels)._sum.get()
return 0.0 \
if process_latency_ms_count <= 0.0 \
else process_latency_ms_sum / process_latency_ms_count
def get_total_received_1min(self):
return self.stat_total_received_1min.labels(*self.metrics_labels)._value.get();
def get_total_processed_1min(self):
return self.stat_total_processed_1min.labels(*self.metrics_labels)._value.get();
def get_total_processed_successfully_1min(self):
return self.stat_total_processed_successfully_1min.labels(*self.metrics_labels)._value.get();
def get_total_sys_exceptions_1min(self):
return self.stat_total_sys_exceptions_1min.labels(*self.metrics_labels)._value.get();
def get_total_user_exceptions_1min(self):
return self.stat_total_user_exceptions_1min.labels(*self.metrics_labels)._value.get();
def get_avg_process_latency_1min(self):
process_latency_ms_count = self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._count.get()
process_latency_ms_sum = self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._sum.get()
return 0.0 \
if process_latency_ms_count <= 0.0 \
else process_latency_ms_sum / process_latency_ms_count
def get_last_invocation(self):
return self.stat_last_invocation.labels(*self.metrics_labels)._value.get()
def incr_total_processed(self):
self.stat_total_processed.labels(*self.metrics_labels).inc()
self.stat_total_processed_1min.labels(*self.metrics_labels).inc()
def incr_total_processed_successfully(self):
self.stat_total_processed_successfully.labels(*self.metrics_labels).inc()
self.stat_total_processed_successfully_1min.labels(*self.metrics_labels).inc()
def incr_total_sys_exceptions(self):
self.stat_total_sys_exceptions.labels(*self.metrics_labels).inc()
self.stat_total_sys_exceptions_1min.labels(*self.metrics_labels).inc()
self.add_sys_exception()
def incr_total_user_exceptions(self):
self.stat_total_user_exceptions.labels(*self.metrics_labels).inc()
self.stat_total_user_exceptions_1min.labels(*self.metrics_labels).inc()
self.add_user_exception()
def incr_total_received(self):
self.stat_total_received.labels(*self.metrics_labels).inc()
self.stat_total_received_1min.labels(*self.metrics_labels).inc()
def process_time_start(self):
self.process_start_time = time.time();
def process_time_end(self):
if self.process_start_time:
duration = (time.time() - self.process_start_time) * 1000.0
self.stat_process_latency_ms.labels(*self.metrics_labels).observe(duration)
self.stat_process_latency_ms_1min.labels(*self.metrics_labels).observe(duration)
def set_last_invocation(self, time):
self.stat_last_invocation.labels(*self.metrics_labels).set(time * 1000.0)
def add_user_exception(self):
self.latest_sys_exception.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latest_sys_exception) > 10:
self.latest_sys_exception.pop(0)
def add_sys_exception(self):
self.latest_sys_exception.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latest_sys_exception) > 10:
self.latest_sys_exception.pop(0)
def reset(self):
self.latest_user_exception = []
self.latest_sys_exception = []
self.stat_total_processed_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_total_processed_successfully_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_total_user_exceptions_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_total_sys_exceptions_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._sum.set(0.0)
self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._count.set(0.0)
self.stat_total_received_1min.labels(*self.metrics_labels)._value.set(0.0) | 49.088542 | 158 | 0.766154 |
import traceback
import time
import util
from prometheus_client import Counter, Summary, Gauge
class Stats(object):
metrics_label_names = ['tenant', 'namespace', 'function', 'instance_id', 'cluster']
PULSAR_FUNCTION_METRICS_PREFIX = "pulsar_function_"
USER_METRIC_PREFIX = "user_metric_";
TOTAL_PROCESSED = 'processed_total'
TOTAL_SUCCESSFULLY_PROCESSED = 'processed_successfully_total'
TOTAL_SYSTEM_EXCEPTIONS = 'system_exceptions_total'
TOTAL_USER_EXCEPTIONS = 'user_exceptions_total'
PROCESS_LATENCY_MS = 'process_latency_ms'
LAST_INVOCATION = 'last_invocation'
TOTAL_RECEIVED = 'received_total'
TOTAL_PROCESSED_1min = 'processed_total_1min'
TOTAL_SUCCESSFULLY_PROCESSED_1min = 'processed_successfully_total_1min'
TOTAL_SYSTEM_EXCEPTIONS_1min = 'system_exceptions_total_1min'
TOTAL_USER_EXCEPTIONS_1min = 'user_exceptions_total_1min'
PROCESS_LATENCY_MS_1min = 'process_latency_ms_1min'
TOTAL_RECEIVED_1min = 'received_total_1min'
stat_total_processed = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_PROCESSED, 'Total number of messages processed.', metrics_label_names)
stat_total_processed_successfully = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SUCCESSFULLY_PROCESSED,
'Total number of messages processed successfully.', metrics_label_names)
stat_total_sys_exceptions = Counter(PULSAR_FUNCTION_METRICS_PREFIX+ TOTAL_SYSTEM_EXCEPTIONS, 'Total number of system exceptions.',
metrics_label_names)
stat_total_user_exceptions = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_USER_EXCEPTIONS, 'Total number of user exceptions.',
metrics_label_names)
stat_process_latency_ms = Summary(PULSAR_FUNCTION_METRICS_PREFIX + PROCESS_LATENCY_MS, 'Process latency in milliseconds.', metrics_label_names)
stat_last_invocation = Gauge(PULSAR_FUNCTION_METRICS_PREFIX + LAST_INVOCATION, 'The timestamp of the last invocation of the function.', metrics_label_names)
stat_total_received = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_RECEIVED, 'Total number of messages received from source.', metrics_label_names)
stat_total_processed_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_PROCESSED_1min,
'Total number of messages processed in the last 1 minute.', metrics_label_names)
stat_total_processed_successfully_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SUCCESSFULLY_PROCESSED_1min,
'Total number of messages processed successfully in the last 1 minute.', metrics_label_names)
stat_total_sys_exceptions_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SYSTEM_EXCEPTIONS_1min,
'Total number of system exceptions in the last 1 minute.',
metrics_label_names)
stat_total_user_exceptions_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_USER_EXCEPTIONS_1min,
'Total number of user exceptions in the last 1 minute.',
metrics_label_names)
stat_process_latency_ms_1min = Summary(PULSAR_FUNCTION_METRICS_PREFIX + PROCESS_LATENCY_MS_1min,
'Process latency in milliseconds in the last 1 minute.', metrics_label_names)
stat_total_received_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_RECEIVED_1min,
'Total number of messages received from source in the last 1 minute.', metrics_label_names)
latest_user_exception = []
latest_sys_exception = []
def __init__(self, metrics_labels):
self.metrics_labels = metrics_labels;
self.process_start_time = None
util.FixedTimer(60, self.reset).start()
def get_total_received(self):
return self.stat_total_received.labels(*self.metrics_labels)._value.get();
def get_total_processed(self):
return self.stat_total_processed.labels(*self.metrics_labels)._value.get();
def get_total_processed_successfully(self):
return self.stat_total_processed_successfully.labels(*self.metrics_labels)._value.get();
def get_total_sys_exceptions(self):
return self.stat_total_sys_exceptions.labels(*self.metrics_labels)._value.get();
def get_total_user_exceptions(self):
return self.stat_total_user_exceptions.labels(*self.metrics_labels)._value.get();
def get_avg_process_latency(self):
process_latency_ms_count = self.stat_process_latency_ms.labels(*self.metrics_labels)._count.get()
process_latency_ms_sum = self.stat_process_latency_ms.labels(*self.metrics_labels)._sum.get()
return 0.0 \
if process_latency_ms_count <= 0.0 \
else process_latency_ms_sum / process_latency_ms_count
def get_total_received_1min(self):
return self.stat_total_received_1min.labels(*self.metrics_labels)._value.get();
def get_total_processed_1min(self):
return self.stat_total_processed_1min.labels(*self.metrics_labels)._value.get();
def get_total_processed_successfully_1min(self):
return self.stat_total_processed_successfully_1min.labels(*self.metrics_labels)._value.get();
def get_total_sys_exceptions_1min(self):
return self.stat_total_sys_exceptions_1min.labels(*self.metrics_labels)._value.get();
def get_total_user_exceptions_1min(self):
return self.stat_total_user_exceptions_1min.labels(*self.metrics_labels)._value.get();
def get_avg_process_latency_1min(self):
process_latency_ms_count = self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._count.get()
process_latency_ms_sum = self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._sum.get()
return 0.0 \
if process_latency_ms_count <= 0.0 \
else process_latency_ms_sum / process_latency_ms_count
def get_last_invocation(self):
return self.stat_last_invocation.labels(*self.metrics_labels)._value.get()
def incr_total_processed(self):
self.stat_total_processed.labels(*self.metrics_labels).inc()
self.stat_total_processed_1min.labels(*self.metrics_labels).inc()
def incr_total_processed_successfully(self):
self.stat_total_processed_successfully.labels(*self.metrics_labels).inc()
self.stat_total_processed_successfully_1min.labels(*self.metrics_labels).inc()
def incr_total_sys_exceptions(self):
self.stat_total_sys_exceptions.labels(*self.metrics_labels).inc()
self.stat_total_sys_exceptions_1min.labels(*self.metrics_labels).inc()
self.add_sys_exception()
def incr_total_user_exceptions(self):
self.stat_total_user_exceptions.labels(*self.metrics_labels).inc()
self.stat_total_user_exceptions_1min.labels(*self.metrics_labels).inc()
self.add_user_exception()
def incr_total_received(self):
self.stat_total_received.labels(*self.metrics_labels).inc()
self.stat_total_received_1min.labels(*self.metrics_labels).inc()
def process_time_start(self):
self.process_start_time = time.time();
def process_time_end(self):
if self.process_start_time:
duration = (time.time() - self.process_start_time) * 1000.0
self.stat_process_latency_ms.labels(*self.metrics_labels).observe(duration)
self.stat_process_latency_ms_1min.labels(*self.metrics_labels).observe(duration)
def set_last_invocation(self, time):
self.stat_last_invocation.labels(*self.metrics_labels).set(time * 1000.0)
def add_user_exception(self):
self.latest_sys_exception.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latest_sys_exception) > 10:
self.latest_sys_exception.pop(0)
def add_sys_exception(self):
self.latest_sys_exception.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latest_sys_exception) > 10:
self.latest_sys_exception.pop(0)
def reset(self):
self.latest_user_exception = []
self.latest_sys_exception = []
self.stat_total_processed_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_total_processed_successfully_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_total_user_exceptions_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_total_sys_exceptions_1min.labels(*self.metrics_labels)._value.set(0.0)
self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._sum.set(0.0)
self.stat_process_latency_ms_1min.labels(*self.metrics_labels)._count.set(0.0)
self.stat_total_received_1min.labels(*self.metrics_labels)._value.set(0.0) | true | true |
f720a49b1ccb9afbd2e211fb293d03f55301e147 | 2,226 | py | Python | Numpy/code.py | JayeshSukhija/ga-learner-dsmp-repo | 4c05d980462dde423b6be41cca1218d6d98e8e48 | [
"MIT"
] | null | null | null | Numpy/code.py | JayeshSukhija/ga-learner-dsmp-repo | 4c05d980462dde423b6be41cca1218d6d98e8e48 | [
"MIT"
] | null | null | null | Numpy/code.py | JayeshSukhija/ga-learner-dsmp-repo | 4c05d980462dde423b6be41cca1218d6d98e8e48 | [
"MIT"
] | null | null | null | # --------------
# Importing header files
import numpy as np
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
#Loading data file and saving it into a new numpy array
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print(data.shape)
#Concatenating the new record to the existing numpy array
census=np.concatenate((data, new_record),axis = 0)
print(census.shape)
#Code ends here
# --------------
#Code starts here
import numpy as np
age=census[:,0]
print (age)
print ('='*50)
max_age=np.max(age)
print (max_age)
print ('='*50)
min_age=np.min(age)
print (min_age)
print ('='*50)
age_mean=np.mean(age)
print (age_mean)
print ('='*50)
age_std=np.std(age)
print (age_std)
print('='*50)
# --------------
#Code starts here
#Creating new subsets based on 'Age'
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
#Finding the length of the above created subsets
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
#Printing the length of the above created subsets
print('Race_0: ', len_0)
print('Race_1: ', len_1)
print('Race_2: ', len_2)
print('Race_3: ', len_3)
print('Race_4: ', len_4)
#Storing the different race lengths with appropriate indexes
race_list=[len_0, len_1,len_2, len_3, len_4]
#Storing the race with minimum length into a variable
minority_race=race_list.index(min(race_list))
print ('minority_race:',minority_race)
#Code ends here
# --------------
#Code starts here
import numpy as np
senior_citizens=census[census[:,0 ]>60]
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len=len(senior_citizens)
avg_working_hours=(working_hours_sum/senior_citizens_len)
print (avg_working_hours)
# --------------
#Code starts here
import numpy as np
high=census[census[:,1 ]>10]
low=census[census[:,1]<=10]
avg_pay_high=high.mean(axis=0)[7]
avg_pay_low=low.mean(axis=0)[7]
if (avg_pay_high>avg_pay_low):
print ("Better Education leads to better pay")
else:
print ("Better Education does not leads to better pay")
| 20.611111 | 61 | 0.676101 |
import numpy as np
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print(data.shape)
census=np.concatenate((data, new_record),axis = 0)
print(census.shape)
import numpy as np
age=census[:,0]
print (age)
print ('='*50)
max_age=np.max(age)
print (max_age)
print ('='*50)
min_age=np.min(age)
print (min_age)
print ('='*50)
age_mean=np.mean(age)
print (age_mean)
print ('='*50)
age_std=np.std(age)
print (age_std)
print('='*50)
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
print('Race_0: ', len_0)
print('Race_1: ', len_1)
print('Race_2: ', len_2)
print('Race_3: ', len_3)
print('Race_4: ', len_4)
race_list=[len_0, len_1,len_2, len_3, len_4]
minority_race=race_list.index(min(race_list))
print ('minority_race:',minority_race)
import numpy as np
senior_citizens=census[census[:,0 ]>60]
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len=len(senior_citizens)
avg_working_hours=(working_hours_sum/senior_citizens_len)
print (avg_working_hours)
import numpy as np
high=census[census[:,1 ]>10]
low=census[census[:,1]<=10]
avg_pay_high=high.mean(axis=0)[7]
avg_pay_low=low.mean(axis=0)[7]
if (avg_pay_high>avg_pay_low):
print ("Better Education leads to better pay")
else:
print ("Better Education does not leads to better pay")
| true | true |
f720a7182cc9382555831934ef834b1a0aab840c | 473 | py | Python | enaml/qt/__init__.py | dandycheung/enaml | 1a7d9c95717a359bb2a8435c597eda36c9235fab | [
"BSD-3-Clause-Clear"
] | null | null | null | enaml/qt/__init__.py | dandycheung/enaml | 1a7d9c95717a359bb2a8435c597eda36c9235fab | [
"BSD-3-Clause-Clear"
] | null | null | null | enaml/qt/__init__.py | dandycheung/enaml | 1a7d9c95717a359bb2a8435c597eda36c9235fab | [
"BSD-3-Clause-Clear"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) 2013-2022, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ------------------------------------------------------------------------------
from qtpy import (
API as QT_API,
PYQT5_API,
PYSIDE2_API,
PYQT6_API,
PYSIDE6_API,
QT_VERSION,
)
| 29.5625 | 80 | 0.46723 |
from qtpy import (
API as QT_API,
PYQT5_API,
PYSIDE2_API,
PYQT6_API,
PYSIDE6_API,
QT_VERSION,
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.