body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
33f9b461408d489867fd217b8e491e61a110d0e621dc0d36459c6fa641da5894
|
def processDataGUI(self, inputfile=None, data3d=None, metadata=None, crgui=True):
'\n GUI version of histology analysation algorithm\n '
self.inputfile = inputfile
self.data3d = data3d
self.masked = None
self.metadata = metadata
self.crgui = crgui
if (self.crgui is True):
logger.debug('Gui data crop')
self.data3d = self.showCropDialog(self.data3d)
logger.debug('Init HistologyAnalyser object')
self.ha = HA.HistologyAnalyser(self.data3d, self.metadata, nogui=False, qapp=self.qapp, aggregate_near_nodes_distance=self.args.aggregatenearnodes, hist_length_range=self.args.hist_length_range, hist_radius_range=self.args.hist_radius_range, binaryClosing=self.args.binaryclosing, binaryOpening=self.args.binaryopening)
self.ha.set_anotation(inputfile)
logger.debug('Remove area')
bad_mask = True
if (self.args.maskfile is not None):
logger.debug('Loading mask from file...')
try:
mask = misc.obj_from_file(filename=self.args.maskfile, filetype='pickle')
if (self.ha.data3d.shape == mask.shape):
self.ha.data3d_masked = mask
self.ha.data3d[(mask == 0)] = np.min(self.ha.data3d)
bad_mask = False
else:
logger.error(('Mask file has wrong dimensions ' + str(mask.shape)))
except Exception as e:
logger.error(('Error when processing mask file: ' + str(e)))
if (bad_mask == True):
logger.debug('Falling back to GUI mask mode')
if (bad_mask == True):
self.setStatusBarText('Remove area')
self.showRemoveDialog(self.ha.data3d)
self.ha.data3d_masked = self.masked
if (self.args.savemask and (bad_mask == True)):
self.save_mask()
self.showSegmQueryDialog()
|
GUI version of histology analysation algorithm
|
quantan/histology_analyser_gui.py
|
processDataGUI
|
mjirik/quanta
| 0
|
python
|
def processDataGUI(self, inputfile=None, data3d=None, metadata=None, crgui=True):
'\n \n '
self.inputfile = inputfile
self.data3d = data3d
self.masked = None
self.metadata = metadata
self.crgui = crgui
if (self.crgui is True):
logger.debug('Gui data crop')
self.data3d = self.showCropDialog(self.data3d)
logger.debug('Init HistologyAnalyser object')
self.ha = HA.HistologyAnalyser(self.data3d, self.metadata, nogui=False, qapp=self.qapp, aggregate_near_nodes_distance=self.args.aggregatenearnodes, hist_length_range=self.args.hist_length_range, hist_radius_range=self.args.hist_radius_range, binaryClosing=self.args.binaryclosing, binaryOpening=self.args.binaryopening)
self.ha.set_anotation(inputfile)
logger.debug('Remove area')
bad_mask = True
if (self.args.maskfile is not None):
logger.debug('Loading mask from file...')
try:
mask = misc.obj_from_file(filename=self.args.maskfile, filetype='pickle')
if (self.ha.data3d.shape == mask.shape):
self.ha.data3d_masked = mask
self.ha.data3d[(mask == 0)] = np.min(self.ha.data3d)
bad_mask = False
else:
logger.error(('Mask file has wrong dimensions ' + str(mask.shape)))
except Exception as e:
logger.error(('Error when processing mask file: ' + str(e)))
if (bad_mask == True):
logger.debug('Falling back to GUI mask mode')
if (bad_mask == True):
self.setStatusBarText('Remove area')
self.showRemoveDialog(self.ha.data3d)
self.ha.data3d_masked = self.masked
if (self.args.savemask and (bad_mask == True)):
self.save_mask()
self.showSegmQueryDialog()
|
def processDataGUI(self, inputfile=None, data3d=None, metadata=None, crgui=True):
'\n \n '
self.inputfile = inputfile
self.data3d = data3d
self.masked = None
self.metadata = metadata
self.crgui = crgui
if (self.crgui is True):
logger.debug('Gui data crop')
self.data3d = self.showCropDialog(self.data3d)
logger.debug('Init HistologyAnalyser object')
self.ha = HA.HistologyAnalyser(self.data3d, self.metadata, nogui=False, qapp=self.qapp, aggregate_near_nodes_distance=self.args.aggregatenearnodes, hist_length_range=self.args.hist_length_range, hist_radius_range=self.args.hist_radius_range, binaryClosing=self.args.binaryclosing, binaryOpening=self.args.binaryopening)
self.ha.set_anotation(inputfile)
logger.debug('Remove area')
bad_mask = True
if (self.args.maskfile is not None):
logger.debug('Loading mask from file...')
try:
mask = misc.obj_from_file(filename=self.args.maskfile, filetype='pickle')
if (self.ha.data3d.shape == mask.shape):
self.ha.data3d_masked = mask
self.ha.data3d[(mask == 0)] = np.min(self.ha.data3d)
bad_mask = False
else:
logger.error(('Mask file has wrong dimensions ' + str(mask.shape)))
except Exception as e:
logger.error(('Error when processing mask file: ' + str(e)))
if (bad_mask == True):
logger.debug('Falling back to GUI mask mode')
if (bad_mask == True):
self.setStatusBarText('Remove area')
self.showRemoveDialog(self.ha.data3d)
self.ha.data3d_masked = self.masked
if (self.args.savemask and (bad_mask == True)):
self.save_mask()
self.showSegmQueryDialog()<|docstring|>GUI version of histology analysation algorithm<|endoftext|>
|
60642e1fa007173bd5ab04a92de769a62d3524a8e7cf0792faa6c0f1a6800071
|
def setStatusBarText(self, text=''):
'\n Changes status bar text\n '
self.statusBar().showMessage(text)
QtCore.QCoreApplication.processEvents()
|
Changes status bar text
|
quantan/histology_analyser_gui.py
|
setStatusBarText
|
mjirik/quanta
| 0
|
python
|
def setStatusBarText(self, text=):
'\n \n '
self.statusBar().showMessage(text)
QtCore.QCoreApplication.processEvents()
|
def setStatusBarText(self, text=):
'\n \n '
self.statusBar().showMessage(text)
QtCore.QCoreApplication.processEvents()<|docstring|>Changes status bar text<|endoftext|>
|
a5f38214c549360b75d91e421e14b356011bd76e657e3ab5efdc46404e29e35a
|
def fixWindow(self, width=None, height=None):
'\n Resets Main window size, and makes sure all events (gui changes) were processed\n '
if (width is None):
width = self.WIDTH
if (height is None):
height = self.HEIGHT
self.resize(width, height)
QtCore.QCoreApplication.processEvents()
|
Resets Main window size, and makes sure all events (gui changes) were processed
|
quantan/histology_analyser_gui.py
|
fixWindow
|
mjirik/quanta
| 0
|
python
|
def fixWindow(self, width=None, height=None):
'\n \n '
if (width is None):
width = self.WIDTH
if (height is None):
height = self.HEIGHT
self.resize(width, height)
QtCore.QCoreApplication.processEvents()
|
def fixWindow(self, width=None, height=None):
'\n \n '
if (width is None):
width = self.WIDTH
if (height is None):
height = self.HEIGHT
self.resize(width, height)
QtCore.QCoreApplication.processEvents()<|docstring|>Resets Main window size, and makes sure all events (gui changes) were processed<|endoftext|>
|
27f6084510524a6ee25a5e022f0faac3bd9b931558141a732fe3045d10ccff6e
|
def embedWidget(self, widget=None):
'\n Replaces widget embedded that is in gui\n '
self.ui_gridLayout.removeWidget(self.ui_embeddedAppWindow)
self.ui_embeddedAppWindow.close()
if (widget is None):
self.ui_embeddedAppWindow = QLabel()
else:
self.ui_embeddedAppWindow = widget
self.ui_gridLayout.addWidget(self.ui_embeddedAppWindow, self.ui_embeddedAppWindow_pos, 1)
self.ui_gridLayout.update()
self.fixWindow()
|
Replaces widget embedded that is in gui
|
quantan/histology_analyser_gui.py
|
embedWidget
|
mjirik/quanta
| 0
|
python
|
def embedWidget(self, widget=None):
'\n \n '
self.ui_gridLayout.removeWidget(self.ui_embeddedAppWindow)
self.ui_embeddedAppWindow.close()
if (widget is None):
self.ui_embeddedAppWindow = QLabel()
else:
self.ui_embeddedAppWindow = widget
self.ui_gridLayout.addWidget(self.ui_embeddedAppWindow, self.ui_embeddedAppWindow_pos, 1)
self.ui_gridLayout.update()
self.fixWindow()
|
def embedWidget(self, widget=None):
'\n \n '
self.ui_gridLayout.removeWidget(self.ui_embeddedAppWindow)
self.ui_embeddedAppWindow.close()
if (widget is None):
self.ui_embeddedAppWindow = QLabel()
else:
self.ui_embeddedAppWindow = widget
self.ui_gridLayout.addWidget(self.ui_embeddedAppWindow, self.ui_embeddedAppWindow_pos, 1)
self.ui_gridLayout.update()
self.fixWindow()<|docstring|>Replaces widget embedded that is in gui<|endoftext|>
|
967bb4e16f02a4c6abb4751ec3cc794dbadc4f9c42bdf0607ba7a2d9243d3fb9
|
def save_segmentation(self):
'\n save segmentation dialog\n :return:\n '
logger.debug('save segmentation')
fn = self.getSavePath('segmentation', 'dcm')
self.ha.save_segmentation(fn)
|
save segmentation dialog
:return:
|
quantan/histology_analyser_gui.py
|
save_segmentation
|
mjirik/quanta
| 0
|
python
|
def save_segmentation(self):
'\n save segmentation dialog\n :return:\n '
logger.debug('save segmentation')
fn = self.getSavePath('segmentation', 'dcm')
self.ha.save_segmentation(fn)
|
def save_segmentation(self):
'\n save segmentation dialog\n :return:\n '
logger.debug('save segmentation')
fn = self.getSavePath('segmentation', 'dcm')
self.ha.save_segmentation(fn)<|docstring|>save segmentation dialog
:return:<|endoftext|>
|
e948614b54e10e047e4810bfbb148821d8302554df06d258d0e7a669545ae183
|
def save_skeleton(self):
'\n save labeled skeleton dialog\n :return:\n '
logger.debug('save skeleton')
fn = self.getSavePath('skeleton', 'dcm')
self.ha.save_skeleton(fn)
|
save labeled skeleton dialog
:return:
|
quantan/histology_analyser_gui.py
|
save_skeleton
|
mjirik/quanta
| 0
|
python
|
def save_skeleton(self):
'\n save labeled skeleton dialog\n :return:\n '
logger.debug('save skeleton')
fn = self.getSavePath('skeleton', 'dcm')
self.ha.save_skeleton(fn)
|
def save_skeleton(self):
'\n save labeled skeleton dialog\n :return:\n '
logger.debug('save skeleton')
fn = self.getSavePath('skeleton', 'dcm')
self.ha.save_skeleton(fn)<|docstring|>save labeled skeleton dialog
:return:<|endoftext|>
|
cd02494f573942d51b5327df8e118a6af9ac6fa344519a5e2507895cc0bd90f4
|
def save_labeled_skeleton(self):
'\n save labeled skeleton dialog\n :return:\n '
logger.debug('save labeled skeleton')
fn = self.getSavePath('labeled_skeleton', 'dcm')
self.ha.save_labeled_skeleton(fn)
|
save labeled skeleton dialog
:return:
|
quantan/histology_analyser_gui.py
|
save_labeled_skeleton
|
mjirik/quanta
| 0
|
python
|
def save_labeled_skeleton(self):
'\n save labeled skeleton dialog\n :return:\n '
logger.debug('save labeled skeleton')
fn = self.getSavePath('labeled_skeleton', 'dcm')
self.ha.save_labeled_skeleton(fn)
|
def save_labeled_skeleton(self):
'\n save labeled skeleton dialog\n :return:\n '
logger.debug('save labeled skeleton')
fn = self.getSavePath('labeled_skeleton', 'dcm')
self.ha.save_labeled_skeleton(fn)<|docstring|>save labeled skeleton dialog
:return:<|endoftext|>
|
b3a2cf531479b45edfc13f2571889787f07089dde8704eaefa108c8c4e10553d
|
def __get_datafile(self, app=False, directory=''):
'\n Draw a dialog for file selection.\n '
from PyQt4.QtGui import QFileDialog
if app:
dcmdir = QFileDialog.getOpenFileName(caption='Select Data File', directory=directory)
else:
app = QApplication(sys.argv)
dcmdir = QFileDialog.getOpenFileName(caption='Select Data File', directory=directory)
app.exit(0)
if (len(dcmdir) > 0):
dcmdir = ('%s' % dcmdir)
dcmdir = dcmdir.encode('utf8')
else:
dcmdir = None
return dcmdir
|
Draw a dialog for file selection.
|
quantan/histology_analyser_gui.py
|
__get_datafile
|
mjirik/quanta
| 0
|
python
|
def __get_datafile(self, app=False, directory=):
'\n \n '
from PyQt4.QtGui import QFileDialog
if app:
dcmdir = QFileDialog.getOpenFileName(caption='Select Data File', directory=directory)
else:
app = QApplication(sys.argv)
dcmdir = QFileDialog.getOpenFileName(caption='Select Data File', directory=directory)
app.exit(0)
if (len(dcmdir) > 0):
dcmdir = ('%s' % dcmdir)
dcmdir = dcmdir.encode('utf8')
else:
dcmdir = None
return dcmdir
|
def __get_datafile(self, app=False, directory=):
'\n \n '
from PyQt4.QtGui import QFileDialog
if app:
dcmdir = QFileDialog.getOpenFileName(caption='Select Data File', directory=directory)
else:
app = QApplication(sys.argv)
dcmdir = QFileDialog.getOpenFileName(caption='Select Data File', directory=directory)
app.exit(0)
if (len(dcmdir) > 0):
dcmdir = ('%s' % dcmdir)
dcmdir = dcmdir.encode('utf8')
else:
dcmdir = None
return dcmdir<|docstring|>Draw a dialog for file selection.<|endoftext|>
|
218b7c5e0280c967cd371a34c2b770d706072c3267ddaf0cf60658108e422750
|
def __get_datadir(self, app=False, directory=''):
'\n Draw a dialog for directory selection.\n '
from PyQt4.QtGui import QFileDialog
if app:
dcmdir = QFileDialog.getExistingDirectory(caption='Select Folder', options=QFileDialog.ShowDirsOnly, directory=directory)
else:
app = QApplication(sys.argv)
dcmdir = QFileDialog.getExistingDirectory(caption='Select Folder', options=QFileDialog.ShowDirsOnly, directory=directory)
app.exit(0)
if (len(dcmdir) > 0):
dcmdir = ('%s' % dcmdir)
dcmdir = dcmdir.encode('utf8')
else:
dcmdir = None
return dcmdir
|
Draw a dialog for directory selection.
|
quantan/histology_analyser_gui.py
|
__get_datadir
|
mjirik/quanta
| 0
|
python
|
def __get_datadir(self, app=False, directory=):
'\n \n '
from PyQt4.QtGui import QFileDialog
if app:
dcmdir = QFileDialog.getExistingDirectory(caption='Select Folder', options=QFileDialog.ShowDirsOnly, directory=directory)
else:
app = QApplication(sys.argv)
dcmdir = QFileDialog.getExistingDirectory(caption='Select Folder', options=QFileDialog.ShowDirsOnly, directory=directory)
app.exit(0)
if (len(dcmdir) > 0):
dcmdir = ('%s' % dcmdir)
dcmdir = dcmdir.encode('utf8')
else:
dcmdir = None
return dcmdir
|
def __get_datadir(self, app=False, directory=):
'\n \n '
from PyQt4.QtGui import QFileDialog
if app:
dcmdir = QFileDialog.getExistingDirectory(caption='Select Folder', options=QFileDialog.ShowDirsOnly, directory=directory)
else:
app = QApplication(sys.argv)
dcmdir = QFileDialog.getExistingDirectory(caption='Select Folder', options=QFileDialog.ShowDirsOnly, directory=directory)
app.exit(0)
if (len(dcmdir) > 0):
dcmdir = ('%s' % dcmdir)
dcmdir = dcmdir.encode('utf8')
else:
dcmdir = None
return dcmdir<|docstring|>Draw a dialog for directory selection.<|endoftext|>
|
4809348303dc7610a0b6204aea352bfe4f953d3e56a49cedd582c0da2b2096ec
|
def query_sip_indicator(indicator_id):
'Queries SIP for indicator details. Returns the dictionary containing the information \n (see the SIP documenation for dictionary schema.)'
assert isinstance(indicator_id, int)
import pysip
sip_client = pysip.Client(saq.CONFIG['sip']['remote_address'], saq.CONFIG['sip']['api_key'], verify=False)
return sip_client.get(f'indicators/{indicator_id}')
|
Queries SIP for indicator details. Returns the dictionary containing the information
(see the SIP documenation for dictionary schema.)
|
lib/saq/intel.py
|
query_sip_indicator
|
krayzpipes/ACE-1
| 28
|
python
|
def query_sip_indicator(indicator_id):
'Queries SIP for indicator details. Returns the dictionary containing the information \n (see the SIP documenation for dictionary schema.)'
assert isinstance(indicator_id, int)
import pysip
sip_client = pysip.Client(saq.CONFIG['sip']['remote_address'], saq.CONFIG['sip']['api_key'], verify=False)
return sip_client.get(f'indicators/{indicator_id}')
|
def query_sip_indicator(indicator_id):
'Queries SIP for indicator details. Returns the dictionary containing the information \n (see the SIP documenation for dictionary schema.)'
assert isinstance(indicator_id, int)
import pysip
sip_client = pysip.Client(saq.CONFIG['sip']['remote_address'], saq.CONFIG['sip']['api_key'], verify=False)
return sip_client.get(f'indicators/{indicator_id}')<|docstring|>Queries SIP for indicator details. Returns the dictionary containing the information
(see the SIP documenation for dictionary schema.)<|endoftext|>
|
ac37bfdf3fda4ed778806b627d138b067a21257e1e6043eef77611ad5ec71dba
|
def set_sip_indicator_status(indicator_id, status):
'Sets the given indicator to the given status. Returns True if the operation succeeded.'
assert isinstance(indicator_id, int)
assert isinstance(status, str)
import pysip
sip_client = pysip.Client(saq.CONFIG['sip']['remote_address'], saq.CONFIG['sip']['api_key'], verify=False)
return sip_client.put(f'indicators/{indicator_id}', data={'status': status})
|
Sets the given indicator to the given status. Returns True if the operation succeeded.
|
lib/saq/intel.py
|
set_sip_indicator_status
|
krayzpipes/ACE-1
| 28
|
python
|
def set_sip_indicator_status(indicator_id, status):
assert isinstance(indicator_id, int)
assert isinstance(status, str)
import pysip
sip_client = pysip.Client(saq.CONFIG['sip']['remote_address'], saq.CONFIG['sip']['api_key'], verify=False)
return sip_client.put(f'indicators/{indicator_id}', data={'status': status})
|
def set_sip_indicator_status(indicator_id, status):
assert isinstance(indicator_id, int)
assert isinstance(status, str)
import pysip
sip_client = pysip.Client(saq.CONFIG['sip']['remote_address'], saq.CONFIG['sip']['api_key'], verify=False)
return sip_client.put(f'indicators/{indicator_id}', data={'status': status})<|docstring|>Sets the given indicator to the given status. Returns True if the operation succeeded.<|endoftext|>
|
05cea368b83ededdd59b9852357fd6b0ad0d1aac8a62463adc785a942d2220dd
|
def tearDown(self):
'\n tear down method that cleans up after each test case is run\n '
User.user_List = []
|
tear down method that cleans up after each test case is run
|
locker_test.py
|
tearDown
|
Tu276/Password_locker
| 0
|
python
|
def tearDown(self):
'\n \n '
User.user_List = []
|
def tearDown(self):
'\n \n '
User.user_List = []<|docstring|>tear down method that cleans up after each test case is run<|endoftext|>
|
9e5ac67ad117b4b7a08af507de07f94ee531a1e28fbb8ec4a485bc7214bba79d
|
def setUp(self):
'\n Set up method to run before each test cases.\n '
self.new_user = User('tu276', 'nathan')
|
Set up method to run before each test cases.
|
locker_test.py
|
setUp
|
Tu276/Password_locker
| 0
|
python
|
def setUp(self):
'\n \n '
self.new_user = User('tu276', 'nathan')
|
def setUp(self):
'\n \n '
self.new_user = User('tu276', 'nathan')<|docstring|>Set up method to run before each test cases.<|endoftext|>
|
9cf565390f3fdc60eeff3327a6298ec9335cc659ee84fd11327754a6ed3e5bb6
|
def test_init(self):
'\n test_init test case to test if the object is initialized properly\n '
self.assertEqual(self.new_user.login_username, 'tu276')
self.assertEqual(self.new_user.user_password, 'nathan')
|
test_init test case to test if the object is initialized properly
|
locker_test.py
|
test_init
|
Tu276/Password_locker
| 0
|
python
|
def test_init(self):
'\n \n '
self.assertEqual(self.new_user.login_username, 'tu276')
self.assertEqual(self.new_user.user_password, 'nathan')
|
def test_init(self):
'\n \n '
self.assertEqual(self.new_user.login_username, 'tu276')
self.assertEqual(self.new_user.user_password, 'nathan')<|docstring|>test_init test case to test if the object is initialized properly<|endoftext|>
|
6952a4278857c053620aa2bd66a61dff921224c3af5cb369b497b32da8d66a63
|
def test_save_user(self):
'\n test case to see if user ogject is saved into \n\n '
self.new_user.save_user()
self.assertEqual(len(User.user_List), 1)
|
test case to see if user ogject is saved into
|
locker_test.py
|
test_save_user
|
Tu276/Password_locker
| 0
|
python
|
def test_save_user(self):
'\n \n\n '
self.new_user.save_user()
self.assertEqual(len(User.user_List), 1)
|
def test_save_user(self):
'\n \n\n '
self.new_user.save_user()
self.assertEqual(len(User.user_List), 1)<|docstring|>test case to see if user ogject is saved into<|endoftext|>
|
6f940f8332afa6f21f37be46dbed37898b62f46ad7914e896a161466dbc9a9e2
|
def test_users_exists(self):
'\n returns boolean if users not found test\n '
self.new_user.save_user()
test_user = User('test_user', 'password')
test_user.save_user()
user_exists = User.user_exist('test')
self.assertTrue(user_exists)
|
returns boolean if users not found test
|
locker_test.py
|
test_users_exists
|
Tu276/Password_locker
| 0
|
python
|
def test_users_exists(self):
'\n \n '
self.new_user.save_user()
test_user = User('test_user', 'password')
test_user.save_user()
user_exists = User.user_exist('test')
self.assertTrue(user_exists)
|
def test_users_exists(self):
'\n \n '
self.new_user.save_user()
test_user = User('test_user', 'password')
test_user.save_user()
user_exists = User.user_exist('test')
self.assertTrue(user_exists)<|docstring|>returns boolean if users not found test<|endoftext|>
|
3310d07e8b0bbbc295ca16259148c925d4b166a52e56de35a6c29f6a02085789
|
def setUp(self):
'\n Set up method to run before each test cases.\n '
self.new_credentials = Credentials('facebook', 'tu276', 'nathan')
'\n test_init test case to test if the object is initialized properly\n '
self.assertEqual(self.new_credentials.account_name, 'facebook')
self.assertEqual(self.new_credentials.account_username, 'tu276')
self.assertEqual(self.new_credentials.account_password, 'nathan')
|
Set up method to run before each test cases.
|
locker_test.py
|
setUp
|
Tu276/Password_locker
| 0
|
python
|
def setUp(self):
'\n \n '
self.new_credentials = Credentials('facebook', 'tu276', 'nathan')
'\n test_init test case to test if the object is initialized properly\n '
self.assertEqual(self.new_credentials.account_name, 'facebook')
self.assertEqual(self.new_credentials.account_username, 'tu276')
self.assertEqual(self.new_credentials.account_password, 'nathan')
|
def setUp(self):
'\n \n '
self.new_credentials = Credentials('facebook', 'tu276', 'nathan')
'\n test_init test case to test if the object is initialized properly\n '
self.assertEqual(self.new_credentials.account_name, 'facebook')
self.assertEqual(self.new_credentials.account_username, 'tu276')
self.assertEqual(self.new_credentials.account_password, 'nathan')<|docstring|>Set up method to run before each test cases.<|endoftext|>
|
218803b1f25824d7073e4e1359a995c185da86311dd94c3aa4b2fb7aa2e2fac9
|
def test_save_credentials(self):
'\n test case to see if user ogject is saved into \n\n '
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_List), 1)
|
test case to see if user ogject is saved into
|
locker_test.py
|
test_save_credentials
|
Tu276/Password_locker
| 0
|
python
|
def test_save_credentials(self):
'\n \n\n '
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_List), 1)
|
def test_save_credentials(self):
'\n \n\n '
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_List), 1)<|docstring|>test case to see if user ogject is saved into<|endoftext|>
|
aa4b6e8586b0ffb644c757a0fff4ec414d2daf0a1389c473f8b2bea3d66c823d
|
def test_credentials_exists(self):
'\n returns boolean if credentials not found test\n '
self.new_credentials.save_credentials()
test_credentials = Credentials('test', 'testusername', 'testpassword')
test_credentials.save_credentials()
credentials_exists = Credentials.credentials_exist('test')
self.assertTrue(credentials_exists)
|
returns boolean if credentials not found test
|
locker_test.py
|
test_credentials_exists
|
Tu276/Password_locker
| 0
|
python
|
def test_credentials_exists(self):
'\n \n '
self.new_credentials.save_credentials()
test_credentials = Credentials('test', 'testusername', 'testpassword')
test_credentials.save_credentials()
credentials_exists = Credentials.credentials_exist('test')
self.assertTrue(credentials_exists)
|
def test_credentials_exists(self):
'\n \n '
self.new_credentials.save_credentials()
test_credentials = Credentials('test', 'testusername', 'testpassword')
test_credentials.save_credentials()
credentials_exists = Credentials.credentials_exist('test')
self.assertTrue(credentials_exists)<|docstring|>returns boolean if credentials not found test<|endoftext|>
|
6e4c04f6a728e40d975b6f3c294e4acc36eb1c05a13d71cf01b4ad482c2122ea
|
def test_display_all_credentials(self):
'\n meothod that returns list of saved credentials\n '
self.assertEqual(Credentials.display_credentials(), Credentials.credentials_List)
|
meothod that returns list of saved credentials
|
locker_test.py
|
test_display_all_credentials
|
Tu276/Password_locker
| 0
|
python
|
def test_display_all_credentials(self):
'\n \n '
self.assertEqual(Credentials.display_credentials(), Credentials.credentials_List)
|
def test_display_all_credentials(self):
'\n \n '
self.assertEqual(Credentials.display_credentials(), Credentials.credentials_List)<|docstring|>meothod that returns list of saved credentials<|endoftext|>
|
f4ed5648c95db2fdb33ee6e868d48bc4162a01d121a132cab6e29d0245b832ca
|
@property
def text_cleaned(self):
'\n Will append a TSEK to every syllable except syllables that host\n an affix.\n\n '
if self.syls:
cleaned = TSEK.join([''.join(syl) for syl in self.syls])
if (self.affix_host and (not self.affix)):
return cleaned
else:
return (cleaned + TSEK)
else:
return ''
|
Will append a TSEK to every syllable except syllables that host
an affix.
|
botok/tokenizers/token.py
|
text_cleaned
|
Esukhia/botok
| 17
|
python
|
@property
def text_cleaned(self):
'\n Will append a TSEK to every syllable except syllables that host\n an affix.\n\n '
if self.syls:
cleaned = TSEK.join([.join(syl) for syl in self.syls])
if (self.affix_host and (not self.affix)):
return cleaned
else:
return (cleaned + TSEK)
else:
return
|
@property
def text_cleaned(self):
'\n Will append a TSEK to every syllable except syllables that host\n an affix.\n\n '
if self.syls:
cleaned = TSEK.join([.join(syl) for syl in self.syls])
if (self.affix_host and (not self.affix)):
return cleaned
else:
return (cleaned + TSEK)
else:
return <|docstring|>Will append a TSEK to every syllable except syllables that host
an affix.<|endoftext|>
|
ed6e737f6d6d8d1ae512671ae112db8e4e45f3bf945dd54be2f7bbf11db438f2
|
def update_coords(self):
'Redraw edit handle based on changes to shape'
bbox = self.shape.elem.bbox_int()
(xi, yi) = self.COORD_MAP[self.tag]
x = (((bbox[0] + bbox[2]) / 2) if (xi is None) else bbox[xi])
y = (((bbox[1] + bbox[3]) / 2) if (yi is None) else bbox[yi])
with self.shape.draw_space() as xform:
(x, y) = xform.transform(x, y)
rad = 5
self.handle.update((x - rad), (y - rad), (x + rad), (y + rad))
|
Redraw edit handle based on changes to shape
|
x7/view/shapes/rect.py
|
update_coords
|
gribbg/x7-view
| 0
|
python
|
def update_coords(self):
bbox = self.shape.elem.bbox_int()
(xi, yi) = self.COORD_MAP[self.tag]
x = (((bbox[0] + bbox[2]) / 2) if (xi is None) else bbox[xi])
y = (((bbox[1] + bbox[3]) / 2) if (yi is None) else bbox[yi])
with self.shape.draw_space() as xform:
(x, y) = xform.transform(x, y)
rad = 5
self.handle.update((x - rad), (y - rad), (x + rad), (y + rad))
|
def update_coords(self):
bbox = self.shape.elem.bbox_int()
(xi, yi) = self.COORD_MAP[self.tag]
x = (((bbox[0] + bbox[2]) / 2) if (xi is None) else bbox[xi])
y = (((bbox[1] + bbox[3]) / 2) if (yi is None) else bbox[yi])
with self.shape.draw_space() as xform:
(x, y) = xform.transform(x, y)
rad = 5
self.handle.update((x - rad), (y - rad), (x + rad), (y + rad))<|docstring|>Redraw edit handle based on changes to shape<|endoftext|>
|
e9932ca87ae37ae76d5fbb4c92589496cb4502a5cac1ac93127e253506e94e65
|
def mouse_button2(self, event):
'Handle mouse_button2, usually via self.context_menu()'
self.context_menu(event, [('what?', None), None, ('bye', None)])
|
Handle mouse_button2, usually via self.context_menu()
|
x7/view/shapes/rect.py
|
mouse_button2
|
gribbg/x7-view
| 0
|
python
|
def mouse_button2(self, event):
self.context_menu(event, [('what?', None), None, ('bye', None)])
|
def mouse_button2(self, event):
self.context_menu(event, [('what?', None), None, ('bye', None)])<|docstring|>Handle mouse_button2, usually via self.context_menu()<|endoftext|>
|
62acf176ce60b078a788b221d1f32b73da790ac0bbbee6c56452024c9d504221
|
def process(self, chat_components: list):
'\n Returns\n ----------\n dict :\n save_path : str :\n Actual save path of file.\n total_lines : int :\n count of total lines written to the file.\n '
if (chat_components is None):
return
with open(self.save_path, mode='a', encoding='utf-8') as f:
for component in chat_components:
if (component is None):
continue
chatdata = component.get('chatdata')
if (chatdata is None):
continue
for action in chatdata:
if (action is None):
continue
json_line = json.dumps(action, ensure_ascii=False)
f.writelines((json_line + '\n'))
self.line_counter += 1
return {'save_path': self.save_path, 'total_lines': self.line_counter}
|
Returns
----------
dict :
save_path : str :
Actual save path of file.
total_lines : int :
count of total lines written to the file.
|
pytchat/processors/jsonfile_archiver.py
|
process
|
pedrohbtp/pytchat
| 246
|
python
|
def process(self, chat_components: list):
'\n Returns\n ----------\n dict :\n save_path : str :\n Actual save path of file.\n total_lines : int :\n count of total lines written to the file.\n '
if (chat_components is None):
return
with open(self.save_path, mode='a', encoding='utf-8') as f:
for component in chat_components:
if (component is None):
continue
chatdata = component.get('chatdata')
if (chatdata is None):
continue
for action in chatdata:
if (action is None):
continue
json_line = json.dumps(action, ensure_ascii=False)
f.writelines((json_line + '\n'))
self.line_counter += 1
return {'save_path': self.save_path, 'total_lines': self.line_counter}
|
def process(self, chat_components: list):
'\n Returns\n ----------\n dict :\n save_path : str :\n Actual save path of file.\n total_lines : int :\n count of total lines written to the file.\n '
if (chat_components is None):
return
with open(self.save_path, mode='a', encoding='utf-8') as f:
for component in chat_components:
if (component is None):
continue
chatdata = component.get('chatdata')
if (chatdata is None):
continue
for action in chatdata:
if (action is None):
continue
json_line = json.dumps(action, ensure_ascii=False)
f.writelines((json_line + '\n'))
self.line_counter += 1
return {'save_path': self.save_path, 'total_lines': self.line_counter}<|docstring|>Returns
----------
dict :
save_path : str :
Actual save path of file.
total_lines : int :
count of total lines written to the file.<|endoftext|>
|
611da98ff4b8a909a7f7ed52895570df299e7fcbb8541e8307b76488430c4bcf
|
@classmethod
def can_translate(cls, header, filename=None):
'Indicate whether this translation class can translate the\n supplied header.\n\n Checks the INSTRUME and FILTER headers.\n\n Parameters\n ----------\n header : `dict`-like\n Header to convert to standardized form.\n filename : `str`, optional\n Name of file being translated.\n\n Returns\n -------\n can : `bool`\n `True` if the header is recognized by this class. `False`\n otherwise.\n '
if ('INSTRUME' in header):
via_instrume = super().can_translate(header, filename=filename)
if via_instrume:
return via_instrume
if (cls.is_keyword_defined(header, 'FILTER') and ('DECam' in header['FILTER'])):
return True
return False
|
Indicate whether this translation class can translate the
supplied header.
Checks the INSTRUME and FILTER headers.
Parameters
----------
header : `dict`-like
Header to convert to standardized form.
filename : `str`, optional
Name of file being translated.
Returns
-------
can : `bool`
`True` if the header is recognized by this class. `False`
otherwise.
|
python/astro_metadata_translator/translators/decam.py
|
can_translate
|
HyperSuprime-Cam/astro_metadata_translator
| 0
|
python
|
@classmethod
def can_translate(cls, header, filename=None):
'Indicate whether this translation class can translate the\n supplied header.\n\n Checks the INSTRUME and FILTER headers.\n\n Parameters\n ----------\n header : `dict`-like\n Header to convert to standardized form.\n filename : `str`, optional\n Name of file being translated.\n\n Returns\n -------\n can : `bool`\n `True` if the header is recognized by this class. `False`\n otherwise.\n '
if ('INSTRUME' in header):
via_instrume = super().can_translate(header, filename=filename)
if via_instrume:
return via_instrume
if (cls.is_keyword_defined(header, 'FILTER') and ('DECam' in header['FILTER'])):
return True
return False
|
@classmethod
def can_translate(cls, header, filename=None):
'Indicate whether this translation class can translate the\n supplied header.\n\n Checks the INSTRUME and FILTER headers.\n\n Parameters\n ----------\n header : `dict`-like\n Header to convert to standardized form.\n filename : `str`, optional\n Name of file being translated.\n\n Returns\n -------\n can : `bool`\n `True` if the header is recognized by this class. `False`\n otherwise.\n '
if ('INSTRUME' in header):
via_instrume = super().can_translate(header, filename=filename)
if via_instrume:
return via_instrume
if (cls.is_keyword_defined(header, 'FILTER') and ('DECam' in header['FILTER'])):
return True
return False<|docstring|>Indicate whether this translation class can translate the
supplied header.
Checks the INSTRUME and FILTER headers.
Parameters
----------
header : `dict`-like
Header to convert to standardized form.
filename : `str`, optional
Name of file being translated.
Returns
-------
can : `bool`
`True` if the header is recognized by this class. `False`
otherwise.<|endoftext|>
|
6fec2de64c2d28f17baf007e368bd63986242e77d71cf6eb55d2bb31db6d48cf
|
@cache_translation
def to_exposure_id(self):
'Calculate exposure ID solely for science observations.\n\n Returns\n -------\n id : `int`\n ID of exposure.\n '
if (self.to_observation_type() != 'science'):
return None
value = self._header['EXPNUM']
self._used_these_cards('EXPNUM')
return value
|
Calculate exposure ID solely for science observations.
Returns
-------
id : `int`
ID of exposure.
|
python/astro_metadata_translator/translators/decam.py
|
to_exposure_id
|
HyperSuprime-Cam/astro_metadata_translator
| 0
|
python
|
@cache_translation
def to_exposure_id(self):
'Calculate exposure ID solely for science observations.\n\n Returns\n -------\n id : `int`\n ID of exposure.\n '
if (self.to_observation_type() != 'science'):
return None
value = self._header['EXPNUM']
self._used_these_cards('EXPNUM')
return value
|
@cache_translation
def to_exposure_id(self):
'Calculate exposure ID solely for science observations.\n\n Returns\n -------\n id : `int`\n ID of exposure.\n '
if (self.to_observation_type() != 'science'):
return None
value = self._header['EXPNUM']
self._used_these_cards('EXPNUM')
return value<|docstring|>Calculate exposure ID solely for science observations.
Returns
-------
id : `int`
ID of exposure.<|endoftext|>
|
6b946c8de1dfe85e52145c8796c609922e2b0653628f6765850872b270931d6b
|
def _translate_from_calib_id(self, field):
'Fetch the ID from the CALIB_ID header.\n\n Calibration products made with constructCalibs have some metadata\n saved in its FITS header CALIB_ID.\n '
data = self._header['CALIB_ID']
match = re.search(('.*%s=(\\S+)' % field), data)
self._used_these_cards('CALIB_ID')
return match.groups()[0]
|
Fetch the ID from the CALIB_ID header.
Calibration products made with constructCalibs have some metadata
saved in its FITS header CALIB_ID.
|
python/astro_metadata_translator/translators/decam.py
|
_translate_from_calib_id
|
HyperSuprime-Cam/astro_metadata_translator
| 0
|
python
|
def _translate_from_calib_id(self, field):
'Fetch the ID from the CALIB_ID header.\n\n Calibration products made with constructCalibs have some metadata\n saved in its FITS header CALIB_ID.\n '
data = self._header['CALIB_ID']
match = re.search(('.*%s=(\\S+)' % field), data)
self._used_these_cards('CALIB_ID')
return match.groups()[0]
|
def _translate_from_calib_id(self, field):
'Fetch the ID from the CALIB_ID header.\n\n Calibration products made with constructCalibs have some metadata\n saved in its FITS header CALIB_ID.\n '
data = self._header['CALIB_ID']
match = re.search(('.*%s=(\\S+)' % field), data)
self._used_these_cards('CALIB_ID')
return match.groups()[0]<|docstring|>Fetch the ID from the CALIB_ID header.
Calibration products made with constructCalibs have some metadata
saved in its FITS header CALIB_ID.<|endoftext|>
|
3bd7c638dee1f69625cb2ca360c6c8107c41252a1123db679aebab58c12b6a30
|
@cache_translation
def to_physical_filter(self):
'Calculate physical filter.\n\n Return `None` if the keyword FILTER does not exist in the header,\n which can happen for some valid Community Pipeline products.\n\n Returns\n -------\n filter : `str`\n The full filter name.\n '
if self.is_key_ok('FILTER'):
value = self._header['FILTER'].strip()
self._used_these_cards('FILTER')
return value
elif self.is_key_ok('CALIB_ID'):
return self._translate_from_calib_id('filter')
else:
return None
|
Calculate physical filter.
Return `None` if the keyword FILTER does not exist in the header,
which can happen for some valid Community Pipeline products.
Returns
-------
filter : `str`
The full filter name.
|
python/astro_metadata_translator/translators/decam.py
|
to_physical_filter
|
HyperSuprime-Cam/astro_metadata_translator
| 0
|
python
|
@cache_translation
def to_physical_filter(self):
'Calculate physical filter.\n\n Return `None` if the keyword FILTER does not exist in the header,\n which can happen for some valid Community Pipeline products.\n\n Returns\n -------\n filter : `str`\n The full filter name.\n '
if self.is_key_ok('FILTER'):
value = self._header['FILTER'].strip()
self._used_these_cards('FILTER')
return value
elif self.is_key_ok('CALIB_ID'):
return self._translate_from_calib_id('filter')
else:
return None
|
@cache_translation
def to_physical_filter(self):
'Calculate physical filter.\n\n Return `None` if the keyword FILTER does not exist in the header,\n which can happen for some valid Community Pipeline products.\n\n Returns\n -------\n filter : `str`\n The full filter name.\n '
if self.is_key_ok('FILTER'):
value = self._header['FILTER'].strip()
self._used_these_cards('FILTER')
return value
elif self.is_key_ok('CALIB_ID'):
return self._translate_from_calib_id('filter')
else:
return None<|docstring|>Calculate physical filter.
Return `None` if the keyword FILTER does not exist in the header,
which can happen for some valid Community Pipeline products.
Returns
-------
filter : `str`
The full filter name.<|endoftext|>
|
4c3f23bee5f2c43af5ce1cbba6e3a1fefa0a767978d8a871d67728b368df6677
|
@cache_translation
def to_location(self):
'Calculate the observatory location.\n\n Returns\n -------\n location : `astropy.coordinates.EarthLocation`\n An object representing the location of the telescope.\n '
if self.is_key_ok('OBS-LONG'):
lon = (self._header['OBS-LONG'] * (- 1.0))
value = EarthLocation.from_geodetic(lon, self._header['OBS-LAT'], self._header['OBS-ELEV'])
self._used_these_cards('OBS-LONG', 'OBS-LAT', 'OBS-ELEV')
else:
value = EarthLocation.of_site('ctio')
return value
|
Calculate the observatory location.
Returns
-------
location : `astropy.coordinates.EarthLocation`
An object representing the location of the telescope.
|
python/astro_metadata_translator/translators/decam.py
|
to_location
|
HyperSuprime-Cam/astro_metadata_translator
| 0
|
python
|
@cache_translation
def to_location(self):
'Calculate the observatory location.\n\n Returns\n -------\n location : `astropy.coordinates.EarthLocation`\n An object representing the location of the telescope.\n '
if self.is_key_ok('OBS-LONG'):
lon = (self._header['OBS-LONG'] * (- 1.0))
value = EarthLocation.from_geodetic(lon, self._header['OBS-LAT'], self._header['OBS-ELEV'])
self._used_these_cards('OBS-LONG', 'OBS-LAT', 'OBS-ELEV')
else:
value = EarthLocation.of_site('ctio')
return value
|
@cache_translation
def to_location(self):
'Calculate the observatory location.\n\n Returns\n -------\n location : `astropy.coordinates.EarthLocation`\n An object representing the location of the telescope.\n '
if self.is_key_ok('OBS-LONG'):
lon = (self._header['OBS-LONG'] * (- 1.0))
value = EarthLocation.from_geodetic(lon, self._header['OBS-LAT'], self._header['OBS-ELEV'])
self._used_these_cards('OBS-LONG', 'OBS-LAT', 'OBS-ELEV')
else:
value = EarthLocation.of_site('ctio')
return value<|docstring|>Calculate the observatory location.
Returns
-------
location : `astropy.coordinates.EarthLocation`
An object representing the location of the telescope.<|endoftext|>
|
01ceccd3f69c7c724f016f67c0985fcefaa4378f707c8cae6fab9a8ca992a811
|
@cache_translation
def to_observation_type(self):
'Calculate the observation type.\n\n Returns\n -------\n typ : `str`\n Observation type. Normalized to standard set.\n '
if (not self.is_key_ok('OBSTYPE')):
return 'none'
obstype = self._header['OBSTYPE'].strip().lower()
self._used_these_cards('OBSTYPE')
if (obstype == 'object'):
return 'science'
return obstype
|
Calculate the observation type.
Returns
-------
typ : `str`
Observation type. Normalized to standard set.
|
python/astro_metadata_translator/translators/decam.py
|
to_observation_type
|
HyperSuprime-Cam/astro_metadata_translator
| 0
|
python
|
@cache_translation
def to_observation_type(self):
'Calculate the observation type.\n\n Returns\n -------\n typ : `str`\n Observation type. Normalized to standard set.\n '
if (not self.is_key_ok('OBSTYPE')):
return 'none'
obstype = self._header['OBSTYPE'].strip().lower()
self._used_these_cards('OBSTYPE')
if (obstype == 'object'):
return 'science'
return obstype
|
@cache_translation
def to_observation_type(self):
'Calculate the observation type.\n\n Returns\n -------\n typ : `str`\n Observation type. Normalized to standard set.\n '
if (not self.is_key_ok('OBSTYPE')):
return 'none'
obstype = self._header['OBSTYPE'].strip().lower()
self._used_these_cards('OBSTYPE')
if (obstype == 'object'):
return 'science'
return obstype<|docstring|>Calculate the observation type.
Returns
-------
typ : `str`
Observation type. Normalized to standard set.<|endoftext|>
|
cfb9787e2f8a96c90d4ee8f53a8a7d30ddf4ccdb58356f832991a55a87d3bb97
|
def expand(arational):
'\n Return an iterator of a regular continued fraction expansion of\n given rational number.\n '
floor = real.floor
element = floor(arational)
(yield element)
(p0, p1) = (1, element)
(q0, q1) = (0, 1)
rest = (arational - element)
assert (0 <= rest < 1)
while rest:
element = floor(rest.inverse())
(yield element)
(p0, p1) = (p1, ((element * p1) + p0))
(q0, q1) = (q1, ((element * q1) + q0))
rest = (rest.inverse() - element)
|
Return an iterator of a regular continued fraction expansion of
given rational number.
|
sandbox/cf.py
|
expand
|
turkeydonkey/nzmath3
| 1
|
python
|
def expand(arational):
'\n Return an iterator of a regular continued fraction expansion of\n given rational number.\n '
floor = real.floor
element = floor(arational)
(yield element)
(p0, p1) = (1, element)
(q0, q1) = (0, 1)
rest = (arational - element)
assert (0 <= rest < 1)
while rest:
element = floor(rest.inverse())
(yield element)
(p0, p1) = (p1, ((element * p1) + p0))
(q0, q1) = (q1, ((element * q1) + q0))
rest = (rest.inverse() - element)
|
def expand(arational):
'\n Return an iterator of a regular continued fraction expansion of\n given rational number.\n '
floor = real.floor
element = floor(arational)
(yield element)
(p0, p1) = (1, element)
(q0, q1) = (0, 1)
rest = (arational - element)
assert (0 <= rest < 1)
while rest:
element = floor(rest.inverse())
(yield element)
(p0, p1) = (p1, ((element * p1) + p0))
(q0, q1) = (q1, ((element * q1) + q0))
rest = (rest.inverse() - element)<|docstring|>Return an iterator of a regular continued fraction expansion of
given rational number.<|endoftext|>
|
8bdab709503d2504ebf764b4a10baea3659a07a4d2325dd3e6a97c730023c9ce
|
def __init__(self, expansion):
'\n ContinuedFraction(expansion) defines a number.\n\n expansion is an iterator generating integer series:\n [a0; a1, a2, ...]\n It can be either finite or infinite.\n '
self._expansion = iter(expansion)
self.numerator = 0
self.denominator = 1
self._numerator_old = 0
self._denominator_old = 0
self._counter = (- 1)
self._exhausted = False
try:
initial_term = next(self._expansion)
self.numerator = initial_term
self._counter = 0
except StopIteration:
self._exhausted = True
if (not self._exhausted):
try:
first_term = next(self._expansion)
(self.denominator, self._denominator_old) = (first_term, 1)
(self.numerator, self._numerator_old) = (((first_term * self.numerator) + 1), self.numerator)
self._counter = 1
except StopIteration:
self._exhausted = True
|
ContinuedFraction(expansion) defines a number.
expansion is an iterator generating integer series:
[a0; a1, a2, ...]
It can be either finite or infinite.
|
sandbox/cf.py
|
__init__
|
turkeydonkey/nzmath3
| 1
|
python
|
def __init__(self, expansion):
'\n ContinuedFraction(expansion) defines a number.\n\n expansion is an iterator generating integer series:\n [a0; a1, a2, ...]\n It can be either finite or infinite.\n '
self._expansion = iter(expansion)
self.numerator = 0
self.denominator = 1
self._numerator_old = 0
self._denominator_old = 0
self._counter = (- 1)
self._exhausted = False
try:
initial_term = next(self._expansion)
self.numerator = initial_term
self._counter = 0
except StopIteration:
self._exhausted = True
if (not self._exhausted):
try:
first_term = next(self._expansion)
(self.denominator, self._denominator_old) = (first_term, 1)
(self.numerator, self._numerator_old) = (((first_term * self.numerator) + 1), self.numerator)
self._counter = 1
except StopIteration:
self._exhausted = True
|
def __init__(self, expansion):
'\n ContinuedFraction(expansion) defines a number.\n\n expansion is an iterator generating integer series:\n [a0; a1, a2, ...]\n It can be either finite or infinite.\n '
self._expansion = iter(expansion)
self.numerator = 0
self.denominator = 1
self._numerator_old = 0
self._denominator_old = 0
self._counter = (- 1)
self._exhausted = False
try:
initial_term = next(self._expansion)
self.numerator = initial_term
self._counter = 0
except StopIteration:
self._exhausted = True
if (not self._exhausted):
try:
first_term = next(self._expansion)
(self.denominator, self._denominator_old) = (first_term, 1)
(self.numerator, self._numerator_old) = (((first_term * self.numerator) + 1), self.numerator)
self._counter = 1
except StopIteration:
self._exhausted = True<|docstring|>ContinuedFraction(expansion) defines a number.
expansion is an iterator generating integer series:
[a0; a1, a2, ...]
It can be either finite or infinite.<|endoftext|>
|
5a9accd64587b8c48954dca38ff08edaafd45d2707196b803d9c7af69b1b0694
|
def convergent(self, atleast):
"\n Return an n-th convergent, where n >= 'atleast' if available.\n "
while ((not self._exhausted) and (self._counter < atleast)):
try:
element = next(self._expansion)
except StopIteration:
self._exhausted = True
break
(self.numerator, self._numerator_old) = (((element * self.numerator) + self._numerator_old), self.numerator)
(self.denominator, self._denominator_old) = (((element * self.denominator) + self._denominator_old), self.denominator)
self._counter += 1
return rational.Rational(self.numerator, self.denominator)
|
Return an n-th convergent, where n >= 'atleast' if available.
|
sandbox/cf.py
|
convergent
|
turkeydonkey/nzmath3
| 1
|
python
|
def convergent(self, atleast):
"\n \n "
while ((not self._exhausted) and (self._counter < atleast)):
try:
element = next(self._expansion)
except StopIteration:
self._exhausted = True
break
(self.numerator, self._numerator_old) = (((element * self.numerator) + self._numerator_old), self.numerator)
(self.denominator, self._denominator_old) = (((element * self.denominator) + self._denominator_old), self.denominator)
self._counter += 1
return rational.Rational(self.numerator, self.denominator)
|
def convergent(self, atleast):
"\n \n "
while ((not self._exhausted) and (self._counter < atleast)):
try:
element = next(self._expansion)
except StopIteration:
self._exhausted = True
break
(self.numerator, self._numerator_old) = (((element * self.numerator) + self._numerator_old), self.numerator)
(self.denominator, self._denominator_old) = (((element * self.denominator) + self._denominator_old), self.denominator)
self._counter += 1
return rational.Rational(self.numerator, self.denominator)<|docstring|>Return an n-th convergent, where n >= 'atleast' if available.<|endoftext|>
|
d018b9707bdeb0cf288de7a2eea92a9118c6cbbe9ff8a9766732928af0485e60
|
def full_process(program: MPQP_Program, active_set: List[int]):
'\n This is the function block that is executed in parallel. This takes a MPQP program as well as an active set combination, and \\\n checks the feasibility of all super sets of cardinality + 1. This is done without using a pruning list as in the other\\\n parallel combinatorial algorithm. This is suited for particularly large problems where an exponential number of pruned\\\n active sets are stored, causing a large memory overhead.\n\n\n :param program:\n :param active_set:\n :return:\n '
feasible_children = []
valid_critical_regions = []
children = generate_children_sets(active_set, program.num_constraints())
for child in children:
if program.check_feasibility(child):
feasible_children.append(child)
else:
continue
if program.check_optimality(child):
region = gen_cr_from_active_set(program, child)
if (region is not None):
valid_critical_regions.append(region)
is_max_depth = ((len(active_set) + 1) == max(program.num_t(), program.num_x()))
if is_max_depth:
feasible_children = []
return [feasible_children, valid_critical_regions]
|
This is the function block that is executed in parallel. This takes a MPQP program as well as an active set combination, and \
checks the feasibility of all super sets of cardinality + 1. This is done without using a pruning list as in the other\
parallel combinatorial algorithm. This is suited for particularly large problems where an exponential number of pruned\
active sets are stored, causing a large memory overhead.
:param program:
:param active_set:
:return:
|
src/ppopt/mp_solvers/mpqp_parrallel_combinatorial_exp.py
|
full_process
|
TAMUparametric/PPOPT
| 9
|
python
|
def full_process(program: MPQP_Program, active_set: List[int]):
'\n This is the function block that is executed in parallel. This takes a MPQP program as well as an active set combination, and \\\n checks the feasibility of all super sets of cardinality + 1. This is done without using a pruning list as in the other\\\n parallel combinatorial algorithm. This is suited for particularly large problems where an exponential number of pruned\\\n active sets are stored, causing a large memory overhead.\n\n\n :param program:\n :param active_set:\n :return:\n '
feasible_children = []
valid_critical_regions = []
children = generate_children_sets(active_set, program.num_constraints())
for child in children:
if program.check_feasibility(child):
feasible_children.append(child)
else:
continue
if program.check_optimality(child):
region = gen_cr_from_active_set(program, child)
if (region is not None):
valid_critical_regions.append(region)
is_max_depth = ((len(active_set) + 1) == max(program.num_t(), program.num_x()))
if is_max_depth:
feasible_children = []
return [feasible_children, valid_critical_regions]
|
def full_process(program: MPQP_Program, active_set: List[int]):
'\n This is the function block that is executed in parallel. This takes a MPQP program as well as an active set combination, and \\\n checks the feasibility of all super sets of cardinality + 1. This is done without using a pruning list as in the other\\\n parallel combinatorial algorithm. This is suited for particularly large problems where an exponential number of pruned\\\n active sets are stored, causing a large memory overhead.\n\n\n :param program:\n :param active_set:\n :return:\n '
feasible_children = []
valid_critical_regions = []
children = generate_children_sets(active_set, program.num_constraints())
for child in children:
if program.check_feasibility(child):
feasible_children.append(child)
else:
continue
if program.check_optimality(child):
region = gen_cr_from_active_set(program, child)
if (region is not None):
valid_critical_regions.append(region)
is_max_depth = ((len(active_set) + 1) == max(program.num_t(), program.num_x()))
if is_max_depth:
feasible_children = []
return [feasible_children, valid_critical_regions]<|docstring|>This is the function block that is executed in parallel. This takes a MPQP program as well as an active set combination, and \
checks the feasibility of all super sets of cardinality + 1. This is done without using a pruning list as in the other\
parallel combinatorial algorithm. This is suited for particularly large problems where an exponential number of pruned\
active sets are stored, causing a large memory overhead.
:param program:
:param active_set:
:return:<|endoftext|>
|
0dcc3e7054548e9a209ca4f7e7384308ea5ad28b6f428455f7866fbee69da1f3
|
def solve(program: MPQP_Program, num_cores=(- 1)) -> Solution:
'\n Solves the MPQP program with a modified algorithm described in Gupta et al. 2011\n\n This is the parallel version of the combinatorial.\n\n url: https://www.sciencedirect.com/science/article/pii/S0005109811003190\n\n :param num_cores: Sets the number of cores that are allocated to run this algorithm\n :param program: MPQP to be solved\n :return: the solution of the MPQP\n '
start = time.time()
if (num_cores == (- 1)):
num_cores = num_cpu_cores()
print(f'Spawned threads across {num_cores}')
pool = Pool(num_cores)
to_check = list()
solution = Solution(program, [])
max_depth = (max(program.num_x(), program.num_t()) - len(program.equality_indices))
if (not program.check_feasibility(program.equality_indices)):
return solution
if program.check_optimality(program.equality_indices):
region = gen_cr_from_active_set(program, program.equality_indices)
if (region is not None):
solution.add_region(region)
to_check.append(program.equality_indices)
for i in range(max_depth):
print(f'Time at depth test {(i + 1)}, {(time.time() - start)}')
print(f'Number of active sets to be considered is {len(to_check)}')
depth_time = time.time()
f = (lambda x: full_process(program, x))
future_list = list()
shuffle(to_check)
outputs = pool.map(f, to_check)
print(f'Time to run all tasks in parallel {(time.time() - depth_time)}')
depth_time = time.time()
for output in outputs:
if (len(output[0]) != 0):
future_list.extend(output[0])
if (len(output[1]) != 0):
for region in output[1]:
solution.add_region(region)
print(f'Time to process all depth outputs {(time.time() - depth_time)}')
to_check = future_list
if (len(to_check) == 0):
break
pool.clear()
return solution
|
Solves the MPQP program with a modified algorithm described in Gupta et al. 2011
This is the parallel version of the combinatorial.
url: https://www.sciencedirect.com/science/article/pii/S0005109811003190
:param num_cores: Sets the number of cores that are allocated to run this algorithm
:param program: MPQP to be solved
:return: the solution of the MPQP
|
src/ppopt/mp_solvers/mpqp_parrallel_combinatorial_exp.py
|
solve
|
TAMUparametric/PPOPT
| 9
|
python
|
def solve(program: MPQP_Program, num_cores=(- 1)) -> Solution:
'\n Solves the MPQP program with a modified algorithm described in Gupta et al. 2011\n\n This is the parallel version of the combinatorial.\n\n url: https://www.sciencedirect.com/science/article/pii/S0005109811003190\n\n :param num_cores: Sets the number of cores that are allocated to run this algorithm\n :param program: MPQP to be solved\n :return: the solution of the MPQP\n '
start = time.time()
if (num_cores == (- 1)):
num_cores = num_cpu_cores()
print(f'Spawned threads across {num_cores}')
pool = Pool(num_cores)
to_check = list()
solution = Solution(program, [])
max_depth = (max(program.num_x(), program.num_t()) - len(program.equality_indices))
if (not program.check_feasibility(program.equality_indices)):
return solution
if program.check_optimality(program.equality_indices):
region = gen_cr_from_active_set(program, program.equality_indices)
if (region is not None):
solution.add_region(region)
to_check.append(program.equality_indices)
for i in range(max_depth):
print(f'Time at depth test {(i + 1)}, {(time.time() - start)}')
print(f'Number of active sets to be considered is {len(to_check)}')
depth_time = time.time()
f = (lambda x: full_process(program, x))
future_list = list()
shuffle(to_check)
outputs = pool.map(f, to_check)
print(f'Time to run all tasks in parallel {(time.time() - depth_time)}')
depth_time = time.time()
for output in outputs:
if (len(output[0]) != 0):
future_list.extend(output[0])
if (len(output[1]) != 0):
for region in output[1]:
solution.add_region(region)
print(f'Time to process all depth outputs {(time.time() - depth_time)}')
to_check = future_list
if (len(to_check) == 0):
break
pool.clear()
return solution
|
def solve(program: MPQP_Program, num_cores=(- 1)) -> Solution:
'\n Solves the MPQP program with a modified algorithm described in Gupta et al. 2011\n\n This is the parallel version of the combinatorial.\n\n url: https://www.sciencedirect.com/science/article/pii/S0005109811003190\n\n :param num_cores: Sets the number of cores that are allocated to run this algorithm\n :param program: MPQP to be solved\n :return: the solution of the MPQP\n '
start = time.time()
if (num_cores == (- 1)):
num_cores = num_cpu_cores()
print(f'Spawned threads across {num_cores}')
pool = Pool(num_cores)
to_check = list()
solution = Solution(program, [])
max_depth = (max(program.num_x(), program.num_t()) - len(program.equality_indices))
if (not program.check_feasibility(program.equality_indices)):
return solution
if program.check_optimality(program.equality_indices):
region = gen_cr_from_active_set(program, program.equality_indices)
if (region is not None):
solution.add_region(region)
to_check.append(program.equality_indices)
for i in range(max_depth):
print(f'Time at depth test {(i + 1)}, {(time.time() - start)}')
print(f'Number of active sets to be considered is {len(to_check)}')
depth_time = time.time()
f = (lambda x: full_process(program, x))
future_list = list()
shuffle(to_check)
outputs = pool.map(f, to_check)
print(f'Time to run all tasks in parallel {(time.time() - depth_time)}')
depth_time = time.time()
for output in outputs:
if (len(output[0]) != 0):
future_list.extend(output[0])
if (len(output[1]) != 0):
for region in output[1]:
solution.add_region(region)
print(f'Time to process all depth outputs {(time.time() - depth_time)}')
to_check = future_list
if (len(to_check) == 0):
break
pool.clear()
return solution<|docstring|>Solves the MPQP program with a modified algorithm described in Gupta et al. 2011
This is the parallel version of the combinatorial.
url: https://www.sciencedirect.com/science/article/pii/S0005109811003190
:param num_cores: Sets the number of cores that are allocated to run this algorithm
:param program: MPQP to be solved
:return: the solution of the MPQP<|endoftext|>
|
80ee772e533daa8098aee58d8c5b2c791328087253406379f94af1ad4eadf9ad
|
def sparse_to_tuple(sparse_mx):
'Convert sparse matrix to tuple representation.'
def to_tuple(mx):
if (not sp.isspmatrix_coo(mx)):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return (coords, values, shape)
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
|
Convert sparse matrix to tuple representation.
|
gcn/utils.py
|
sparse_to_tuple
|
mrkidney/crystal_classification
| 0
|
python
|
def sparse_to_tuple(sparse_mx):
def to_tuple(mx):
if (not sp.isspmatrix_coo(mx)):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return (coords, values, shape)
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
|
def sparse_to_tuple(sparse_mx):
def to_tuple(mx):
if (not sp.isspmatrix_coo(mx)):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return (coords, values, shape)
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx<|docstring|>Convert sparse matrix to tuple representation.<|endoftext|>
|
f547ba728b06c34ddd014197712e0514cea23ae088a9a92b815a99ae62451a24
|
def preprocess_features(features):
'Row-normalize feature matrix and convert to tuple representation'
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, (- 1)).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
|
Row-normalize feature matrix and convert to tuple representation
|
gcn/utils.py
|
preprocess_features
|
mrkidney/crystal_classification
| 0
|
python
|
def preprocess_features(features):
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, (- 1)).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
|
def preprocess_features(features):
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, (- 1)).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)<|docstring|>Row-normalize feature matrix and convert to tuple representation<|endoftext|>
|
48c0e63e9804d6a15b8a2572d5b772d4538afb997f25171c98ca8514833e7dab
|
def normalize_adj(adj):
'Symmetrically normalize adjacency matrix.'
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, (- 0.5)).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)
|
Symmetrically normalize adjacency matrix.
|
gcn/utils.py
|
normalize_adj
|
mrkidney/crystal_classification
| 0
|
python
|
def normalize_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, (- 0.5)).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)
|
def normalize_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, (- 0.5)).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)<|docstring|>Symmetrically normalize adjacency matrix.<|endoftext|>
|
272e416b60aeb909a1ed4302312e0ed4e9e8b533b069bf5a2af24991488e385a
|
def preprocess_adj(adj):
'Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.'
adj_normalized = normalize_adj((adj + sp.eye(adj.shape[0])))
return adj_normalized
|
Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.
|
gcn/utils.py
|
preprocess_adj
|
mrkidney/crystal_classification
| 0
|
python
|
def preprocess_adj(adj):
adj_normalized = normalize_adj((adj + sp.eye(adj.shape[0])))
return adj_normalized
|
def preprocess_adj(adj):
adj_normalized = normalize_adj((adj + sp.eye(adj.shape[0])))
return adj_normalized<|docstring|>Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.<|endoftext|>
|
386fa2fa776e6baa70a032424754171a5966b35b8b4caa68036194006ead062c
|
def construct_feed_dict(features, adj_norm, adj_orig, labels, labels_mask, placeholders):
'Construct feed dictionary.'
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj_norm']: adj_norm})
feed_dict.update({placeholders['adj_orig']: adj_orig})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
|
Construct feed dictionary.
|
gcn/utils.py
|
construct_feed_dict
|
mrkidney/crystal_classification
| 0
|
python
|
def construct_feed_dict(features, adj_norm, adj_orig, labels, labels_mask, placeholders):
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj_norm']: adj_norm})
feed_dict.update({placeholders['adj_orig']: adj_orig})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
|
def construct_feed_dict(features, adj_norm, adj_orig, labels, labels_mask, placeholders):
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj_norm']: adj_norm})
feed_dict.update({placeholders['adj_orig']: adj_orig})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict<|docstring|>Construct feed dictionary.<|endoftext|>
|
46ff389c78933fc25d76f513aebde968cd403e93a042fa95b2ec9fc18a722a62
|
def add_element(self, element_type: str, element_id: str, position: Tuple[(int, int)], **kwargs):
'\n Validates element_id and element_type, then if they are valid,\n adds new element to the scheme at specified position\n '
elem_type_to_class_dct = {'multiplexer': elements.Multiplexer, 'and': elements.AndGate, 'or': elements.OrGate, 'not': elements.NotGate, 'nor': elements.NorGate, 'xor': elements.XorGate, 'nand': elements.NandGate, 'constant': elements.Constant, 'variable': elements.Variable, 'decoder': elements.Decoder, 'encoder': elements.Encoder, 'fulladder': elements.FullAdder, 'addersubtractor': elements.AdderSubtractor, 'shifter': elements.RightShifter, 'srflipflop': elements.GatedSRFlipFlop, 'dflipflop': elements.GatedDFlipFlop}
if (not self._validate_id(element_id)):
raise IdIsAlreadyTakenError(element_id)
try:
new_element = elem_type_to_class_dct[element_type.lower()](element_id, position, **kwargs)
except KeyError as keyerror:
raise WrongElementTypeError(element_type) from keyerror
self._elements[element_id] = new_element
|
Validates element_id and element_type, then if they are valid,
adds new element to the scheme at specified position
|
src/scheme.py
|
add_element
|
archy-co/artilife
| 0
|
python
|
def add_element(self, element_type: str, element_id: str, position: Tuple[(int, int)], **kwargs):
'\n Validates element_id and element_type, then if they are valid,\n adds new element to the scheme at specified position\n '
elem_type_to_class_dct = {'multiplexer': elements.Multiplexer, 'and': elements.AndGate, 'or': elements.OrGate, 'not': elements.NotGate, 'nor': elements.NorGate, 'xor': elements.XorGate, 'nand': elements.NandGate, 'constant': elements.Constant, 'variable': elements.Variable, 'decoder': elements.Decoder, 'encoder': elements.Encoder, 'fulladder': elements.FullAdder, 'addersubtractor': elements.AdderSubtractor, 'shifter': elements.RightShifter, 'srflipflop': elements.GatedSRFlipFlop, 'dflipflop': elements.GatedDFlipFlop}
if (not self._validate_id(element_id)):
raise IdIsAlreadyTakenError(element_id)
try:
new_element = elem_type_to_class_dct[element_type.lower()](element_id, position, **kwargs)
except KeyError as keyerror:
raise WrongElementTypeError(element_type) from keyerror
self._elements[element_id] = new_element
|
def add_element(self, element_type: str, element_id: str, position: Tuple[(int, int)], **kwargs):
'\n Validates element_id and element_type, then if they are valid,\n adds new element to the scheme at specified position\n '
elem_type_to_class_dct = {'multiplexer': elements.Multiplexer, 'and': elements.AndGate, 'or': elements.OrGate, 'not': elements.NotGate, 'nor': elements.NorGate, 'xor': elements.XorGate, 'nand': elements.NandGate, 'constant': elements.Constant, 'variable': elements.Variable, 'decoder': elements.Decoder, 'encoder': elements.Encoder, 'fulladder': elements.FullAdder, 'addersubtractor': elements.AdderSubtractor, 'shifter': elements.RightShifter, 'srflipflop': elements.GatedSRFlipFlop, 'dflipflop': elements.GatedDFlipFlop}
if (not self._validate_id(element_id)):
raise IdIsAlreadyTakenError(element_id)
try:
new_element = elem_type_to_class_dct[element_type.lower()](element_id, position, **kwargs)
except KeyError as keyerror:
raise WrongElementTypeError(element_type) from keyerror
self._elements[element_id] = new_element<|docstring|>Validates element_id and element_type, then if they are valid,
adds new element to the scheme at specified position<|endoftext|>
|
01ce2d2de6feb568d5a66d81417c80f12a1add419ac4aba46c5b6c7366483393
|
def _validate_id(self, id_: str) -> bool:
'\n Checks if the <id> is already assigned to an element in <self._elements> (there is\n an element with such id as key in the self._elements dictionary)\n Return: True if id is available\n False if id is already taken\n '
return (not (id_ in self._elements.keys()))
|
Checks if the <id> is already assigned to an element in <self._elements> (there is
an element with such id as key in the self._elements dictionary)
Return: True if id is available
False if id is already taken
|
src/scheme.py
|
_validate_id
|
archy-co/artilife
| 0
|
python
|
def _validate_id(self, id_: str) -> bool:
'\n Checks if the <id> is already assigned to an element in <self._elements> (there is\n an element with such id as key in the self._elements dictionary)\n Return: True if id is available\n False if id is already taken\n '
return (not (id_ in self._elements.keys()))
|
def _validate_id(self, id_: str) -> bool:
'\n Checks if the <id> is already assigned to an element in <self._elements> (there is\n an element with such id as key in the self._elements dictionary)\n Return: True if id is available\n False if id is already taken\n '
return (not (id_ in self._elements.keys()))<|docstring|>Checks if the <id> is already assigned to an element in <self._elements> (there is
an element with such id as key in the self._elements dictionary)
Return: True if id is available
False if id is already taken<|endoftext|>
|
a20df111c7f4985e29240507e87b487088d1ce446a39dc16f5faac50d4ddc859
|
def add_connection(self, source_id, output_label, destination_id, input_label):
'\n Add connection from *output_label* output of element with id *source_id*\n to *input_label* input of element with id *destination_id* if validation\n is successful\n\n If there is no such output label / input label, corresponding Exception\n will be raised\n '
source = self._elements[source_id]
destination = self._elements[destination_id]
connection = elements.Connection(source, output_label, destination, input_label)
self._validate_connection(connection)
try:
source.set_output_connection(connection)
except KeyError as keyerror:
raise NoSuchOutputLabelError(output_label) from keyerror
destination.set_input_connection(connection)
|
Add connection from *output_label* output of element with id *source_id*
to *input_label* input of element with id *destination_id* if validation
is successful
If there is no such output label / input label, corresponding Exception
will be raised
|
src/scheme.py
|
add_connection
|
archy-co/artilife
| 0
|
python
|
def add_connection(self, source_id, output_label, destination_id, input_label):
'\n Add connection from *output_label* output of element with id *source_id*\n to *input_label* input of element with id *destination_id* if validation\n is successful\n\n If there is no such output label / input label, corresponding Exception\n will be raised\n '
source = self._elements[source_id]
destination = self._elements[destination_id]
connection = elements.Connection(source, output_label, destination, input_label)
self._validate_connection(connection)
try:
source.set_output_connection(connection)
except KeyError as keyerror:
raise NoSuchOutputLabelError(output_label) from keyerror
destination.set_input_connection(connection)
|
def add_connection(self, source_id, output_label, destination_id, input_label):
'\n Add connection from *output_label* output of element with id *source_id*\n to *input_label* input of element with id *destination_id* if validation\n is successful\n\n If there is no such output label / input label, corresponding Exception\n will be raised\n '
source = self._elements[source_id]
destination = self._elements[destination_id]
connection = elements.Connection(source, output_label, destination, input_label)
self._validate_connection(connection)
try:
source.set_output_connection(connection)
except KeyError as keyerror:
raise NoSuchOutputLabelError(output_label) from keyerror
destination.set_input_connection(connection)<|docstring|>Add connection from *output_label* output of element with id *source_id*
to *input_label* input of element with id *destination_id* if validation
is successful
If there is no such output label / input label, corresponding Exception
will be raised<|endoftext|>
|
9c6c52497b9ef658064fac5694657ffeb07c1c0a9dc9f22260a383db4c0ededf
|
def delete_element(self, element_id: str):
'\n Deletes element from scheme with all conections. Corresponding connections\n of connected elements are set to None\n '
if (element_id not in self._elements.keys()):
raise NoSuchIdError(element_id)
element = self._elements[element_id]
for _out in element.outs:
for out_connection in element.outs[_out]:
out_connection.source.delete_output_connection(out_connection.output_label)
out_connection.destination.delete_input_connection(out_connection.input_label)
for _in in element.ins:
in_connection = element.ins[_in]
if (in_connection is None):
continue
in_connection.source.delete_output_connection(in_connection.output_label)
in_connection.destination.delete_input_connection(in_connection.input_label)
self._elements.pop(element_id)
|
Deletes element from scheme with all conections. Corresponding connections
of connected elements are set to None
|
src/scheme.py
|
delete_element
|
archy-co/artilife
| 0
|
python
|
def delete_element(self, element_id: str):
'\n Deletes element from scheme with all conections. Corresponding connections\n of connected elements are set to None\n '
if (element_id not in self._elements.keys()):
raise NoSuchIdError(element_id)
element = self._elements[element_id]
for _out in element.outs:
for out_connection in element.outs[_out]:
out_connection.source.delete_output_connection(out_connection.output_label)
out_connection.destination.delete_input_connection(out_connection.input_label)
for _in in element.ins:
in_connection = element.ins[_in]
if (in_connection is None):
continue
in_connection.source.delete_output_connection(in_connection.output_label)
in_connection.destination.delete_input_connection(in_connection.input_label)
self._elements.pop(element_id)
|
def delete_element(self, element_id: str):
'\n Deletes element from scheme with all conections. Corresponding connections\n of connected elements are set to None\n '
if (element_id not in self._elements.keys()):
raise NoSuchIdError(element_id)
element = self._elements[element_id]
for _out in element.outs:
for out_connection in element.outs[_out]:
out_connection.source.delete_output_connection(out_connection.output_label)
out_connection.destination.delete_input_connection(out_connection.input_label)
for _in in element.ins:
in_connection = element.ins[_in]
if (in_connection is None):
continue
in_connection.source.delete_output_connection(in_connection.output_label)
in_connection.destination.delete_input_connection(in_connection.input_label)
self._elements.pop(element_id)<|docstring|>Deletes element from scheme with all conections. Corresponding connections
of connected elements are set to None<|endoftext|>
|
41af5e7a5ab0a245ae9932fcf6b2c2e9967e5ca8b3a208f5923b88c9f9754117
|
def delete_connection(self, source_id: str, output_label: str, destination_id: str, input_label: str):
'\n Deletes connection between elements by deliting source output and destination input\n '
source = self._elements[source_id]
destination = self._elements[destination_id]
source.delete_output_connection(output_label)
destination.delete_input_connection(input_label)
|
Deletes connection between elements by deliting source output and destination input
|
src/scheme.py
|
delete_connection
|
archy-co/artilife
| 0
|
python
|
def delete_connection(self, source_id: str, output_label: str, destination_id: str, input_label: str):
'\n \n '
source = self._elements[source_id]
destination = self._elements[destination_id]
source.delete_output_connection(output_label)
destination.delete_input_connection(input_label)
|
def delete_connection(self, source_id: str, output_label: str, destination_id: str, input_label: str):
'\n \n '
source = self._elements[source_id]
destination = self._elements[destination_id]
source.delete_output_connection(output_label)
destination.delete_input_connection(input_label)<|docstring|>Deletes connection between elements by deliting source output and destination input<|endoftext|>
|
ee4186ef193f10336e87683bf798cc29936f5703d32294add2aa0ddd828e6bdf
|
def move(self, element_id, new_position):
'\n Moves element with element_id to new_position\n '
self._elements[element_id].position = new_position
|
Moves element with element_id to new_position
|
src/scheme.py
|
move
|
archy-co/artilife
| 0
|
python
|
def move(self, element_id, new_position):
'\n \n '
self._elements[element_id].position = new_position
|
def move(self, element_id, new_position):
'\n \n '
self._elements[element_id].position = new_position<|docstring|>Moves element with element_id to new_position<|endoftext|>
|
58ab992a9a055cccb56844e89a0e389ead4257463fb9889ded4c93e1c65a1003
|
def clear(self):
'\n Deletes all elements from scheme and their connections\n '
iter_elements = self._elements.copy()
for elem_id in iter_elements.keys():
self.delete_element(elem_id)
|
Deletes all elements from scheme and their connections
|
src/scheme.py
|
clear
|
archy-co/artilife
| 0
|
python
|
def clear(self):
'\n \n '
iter_elements = self._elements.copy()
for elem_id in iter_elements.keys():
self.delete_element(elem_id)
|
def clear(self):
'\n \n '
iter_elements = self._elements.copy()
for elem_id in iter_elements.keys():
self.delete_element(elem_id)<|docstring|>Deletes all elements from scheme and their connections<|endoftext|>
|
f42f2be9e4817820163beac5bc9839dbbd785d32a77c4fd479bd6fa51e76d6e3
|
def process_args(args):
'Parse arguments from the command line\n\n Parameters\n ----------\n args : list of str\n Command-line arguments, i.e., sys.argv[1:]\n\n Returns\n -------\n args_parsed : dict\n Dictionary of parsed arguments\n '
parser = argparse.ArgumentParser(description='Split OpenMIC-2018 data into train and test')
parser.add_argument('metadata', help='Path to metadata.csv', type=str)
parser.add_argument('labels', help='Path to sparse-labels.csv', type=str)
parser.add_argument('--dupes', dest='dupe_file', type=str, help='Path to track de-duplication index')
parser.add_argument('-s', '--seed', dest='seed', default=20180903, help='Random seed', type=int)
parser.add_argument('-n', '--num-splits', dest='num_splits', default=1, help='Number of splits to generate', type=int)
parser.add_argument('-r', '--split-ratio', dest='ratio', default=0.75, help='Fraction of data for training', type=float)
parser.add_argument('-p', '--probability-ratio', dest='prob_ratio', default=0.875, type=float, help='Max/min allowable deviation of p(Y | train) / p(Y)')
return vars(parser.parse_args(args))
|
Parse arguments from the command line
Parameters
----------
args : list of str
Command-line arguments, i.e., sys.argv[1:]
Returns
-------
args_parsed : dict
Dictionary of parsed arguments
|
scripts/openmic_split.py
|
process_args
|
cagnolone/openmic-2018
| 56
|
python
|
def process_args(args):
'Parse arguments from the command line\n\n Parameters\n ----------\n args : list of str\n Command-line arguments, i.e., sys.argv[1:]\n\n Returns\n -------\n args_parsed : dict\n Dictionary of parsed arguments\n '
parser = argparse.ArgumentParser(description='Split OpenMIC-2018 data into train and test')
parser.add_argument('metadata', help='Path to metadata.csv', type=str)
parser.add_argument('labels', help='Path to sparse-labels.csv', type=str)
parser.add_argument('--dupes', dest='dupe_file', type=str, help='Path to track de-duplication index')
parser.add_argument('-s', '--seed', dest='seed', default=20180903, help='Random seed', type=int)
parser.add_argument('-n', '--num-splits', dest='num_splits', default=1, help='Number of splits to generate', type=int)
parser.add_argument('-r', '--split-ratio', dest='ratio', default=0.75, help='Fraction of data for training', type=float)
parser.add_argument('-p', '--probability-ratio', dest='prob_ratio', default=0.875, type=float, help='Max/min allowable deviation of p(Y | train) / p(Y)')
return vars(parser.parse_args(args))
|
def process_args(args):
'Parse arguments from the command line\n\n Parameters\n ----------\n args : list of str\n Command-line arguments, i.e., sys.argv[1:]\n\n Returns\n -------\n args_parsed : dict\n Dictionary of parsed arguments\n '
parser = argparse.ArgumentParser(description='Split OpenMIC-2018 data into train and test')
parser.add_argument('metadata', help='Path to metadata.csv', type=str)
parser.add_argument('labels', help='Path to sparse-labels.csv', type=str)
parser.add_argument('--dupes', dest='dupe_file', type=str, help='Path to track de-duplication index')
parser.add_argument('-s', '--seed', dest='seed', default=20180903, help='Random seed', type=int)
parser.add_argument('-n', '--num-splits', dest='num_splits', default=1, help='Number of splits to generate', type=int)
parser.add_argument('-r', '--split-ratio', dest='ratio', default=0.75, help='Fraction of data for training', type=float)
parser.add_argument('-p', '--probability-ratio', dest='prob_ratio', default=0.875, type=float, help='Max/min allowable deviation of p(Y | train) / p(Y)')
return vars(parser.parse_args(args))<|docstring|>Parse arguments from the command line
Parameters
----------
args : list of str
Command-line arguments, i.e., sys.argv[1:]
Returns
-------
args_parsed : dict
Dictionary of parsed arguments<|endoftext|>
|
c7e86db2f5af016ec529caf21c4fa1c327c109383d89342e10df09e2092e0b6a
|
def load_label_matrix(metadata_file, label_file, dupe_file=None):
'Load metadata and sparse labels from CSV\n\n Parameters\n ----------\n metadata_file : str\n label_file : str\n Paths to CSV files storing the openmic metadata and sparse label assignments\n\n dupe_file : str\n Path to CSV file storing a de-duplication mapping of sample keys to artist ids\n\n Returns\n -------\n sample_keys : pd.DataFrame\n Ordered array matching row numbers to sample keys and artist ids\n\n artist_labels : pd.DataFrame\n Sparse (nan-populated) array matching artists to instrument relevance scores\n\n label_matrix : pd.DataFrame\n Sparse (nan-populated array matching sample keys to instrument relevance scores\n '
meta = pd.read_csv(metadata_file)
labels = pd.read_csv(label_file)
if dupe_file:
dedupe = pd.read_csv(dupe_file)
meta = meta.merge(dedupe, on='sample_key', suffixes=('_orig', ''))
skey = meta[['sample_key', 'artist_id']].reset_index()
skm = pd.merge(skey, labels, how='inner')
label_matrix = skm.pivot_table(columns='instrument', values='relevance', index='index')
artist_labels = pd.merge(label_matrix, skm[['artist_id']], left_index=True, right_index=True, how='right').groupby('artist_id').mean()
artist_labels['_negative'] = ((artist_labels.max(axis=1) < 0) * 1.0)
label_matrix.index = meta['sample_key']
return (skey, artist_labels, label_matrix)
|
Load metadata and sparse labels from CSV
Parameters
----------
metadata_file : str
label_file : str
Paths to CSV files storing the openmic metadata and sparse label assignments
dupe_file : str
Path to CSV file storing a de-duplication mapping of sample keys to artist ids
Returns
-------
sample_keys : pd.DataFrame
Ordered array matching row numbers to sample keys and artist ids
artist_labels : pd.DataFrame
Sparse (nan-populated) array matching artists to instrument relevance scores
label_matrix : pd.DataFrame
Sparse (nan-populated array matching sample keys to instrument relevance scores
|
scripts/openmic_split.py
|
load_label_matrix
|
cagnolone/openmic-2018
| 56
|
python
|
def load_label_matrix(metadata_file, label_file, dupe_file=None):
'Load metadata and sparse labels from CSV\n\n Parameters\n ----------\n metadata_file : str\n label_file : str\n Paths to CSV files storing the openmic metadata and sparse label assignments\n\n dupe_file : str\n Path to CSV file storing a de-duplication mapping of sample keys to artist ids\n\n Returns\n -------\n sample_keys : pd.DataFrame\n Ordered array matching row numbers to sample keys and artist ids\n\n artist_labels : pd.DataFrame\n Sparse (nan-populated) array matching artists to instrument relevance scores\n\n label_matrix : pd.DataFrame\n Sparse (nan-populated array matching sample keys to instrument relevance scores\n '
meta = pd.read_csv(metadata_file)
labels = pd.read_csv(label_file)
if dupe_file:
dedupe = pd.read_csv(dupe_file)
meta = meta.merge(dedupe, on='sample_key', suffixes=('_orig', ))
skey = meta[['sample_key', 'artist_id']].reset_index()
skm = pd.merge(skey, labels, how='inner')
label_matrix = skm.pivot_table(columns='instrument', values='relevance', index='index')
artist_labels = pd.merge(label_matrix, skm[['artist_id']], left_index=True, right_index=True, how='right').groupby('artist_id').mean()
artist_labels['_negative'] = ((artist_labels.max(axis=1) < 0) * 1.0)
label_matrix.index = meta['sample_key']
return (skey, artist_labels, label_matrix)
|
def load_label_matrix(metadata_file, label_file, dupe_file=None):
'Load metadata and sparse labels from CSV\n\n Parameters\n ----------\n metadata_file : str\n label_file : str\n Paths to CSV files storing the openmic metadata and sparse label assignments\n\n dupe_file : str\n Path to CSV file storing a de-duplication mapping of sample keys to artist ids\n\n Returns\n -------\n sample_keys : pd.DataFrame\n Ordered array matching row numbers to sample keys and artist ids\n\n artist_labels : pd.DataFrame\n Sparse (nan-populated) array matching artists to instrument relevance scores\n\n label_matrix : pd.DataFrame\n Sparse (nan-populated array matching sample keys to instrument relevance scores\n '
meta = pd.read_csv(metadata_file)
labels = pd.read_csv(label_file)
if dupe_file:
dedupe = pd.read_csv(dupe_file)
meta = meta.merge(dedupe, on='sample_key', suffixes=('_orig', ))
skey = meta[['sample_key', 'artist_id']].reset_index()
skm = pd.merge(skey, labels, how='inner')
label_matrix = skm.pivot_table(columns='instrument', values='relevance', index='index')
artist_labels = pd.merge(label_matrix, skm[['artist_id']], left_index=True, right_index=True, how='right').groupby('artist_id').mean()
artist_labels['_negative'] = ((artist_labels.max(axis=1) < 0) * 1.0)
label_matrix.index = meta['sample_key']
return (skey, artist_labels, label_matrix)<|docstring|>Load metadata and sparse labels from CSV
Parameters
----------
metadata_file : str
label_file : str
Paths to CSV files storing the openmic metadata and sparse label assignments
dupe_file : str
Path to CSV file storing a de-duplication mapping of sample keys to artist ids
Returns
-------
sample_keys : pd.DataFrame
Ordered array matching row numbers to sample keys and artist ids
artist_labels : pd.DataFrame
Sparse (nan-populated) array matching artists to instrument relevance scores
label_matrix : pd.DataFrame
Sparse (nan-populated array matching sample keys to instrument relevance scores<|endoftext|>
|
e813be8a95cdfff46c3a2b8c4d7586bcad3ba01ba68a319d9860bae1faf0e206
|
def check_prob(label_matrix, idx, prob_ratio):
'Check that the probabilities in a sub-sample\n are within a tolerance of the full population.\n\n Parameters\n ----------\n label_matrix : pd.DataFrame\n Array of label assignments\n\n idx : iterable\n Indices of the target sub-sample\n\n prob_ratio:\n The target probability ratio\n\n Returns\n -------\n check_passed : bool\n True if the sub-sampled distribution is within tolerance\n False otherwise\n '
(min_prob, max_prob) = sorted([prob_ratio, (1.0 / prob_ratio)])
all_dist_p = ((label_matrix > 0).sum() / label_matrix.count())
all_dist_n = ((label_matrix <= 0).sum() / label_matrix.count())
sub_dist_p = ((label_matrix.loc[idx] > 0).sum() / label_matrix.loc[idx].count())
sub_dist_n = ((label_matrix.loc[idx] <= 0).sum() / label_matrix.loc[idx].count())
return (np.all(((min_prob * all_dist_p.values) <= sub_dist_p.values)) and np.all((sub_dist_p.values <= (max_prob * all_dist_p.values))) and np.all(((min_prob * all_dist_n.values) <= sub_dist_n.values)) and np.all((sub_dist_n.values <= (max_prob * all_dist_n.values))))
|
Check that the probabilities in a sub-sample
are within a tolerance of the full population.
Parameters
----------
label_matrix : pd.DataFrame
Array of label assignments
idx : iterable
Indices of the target sub-sample
prob_ratio:
The target probability ratio
Returns
-------
check_passed : bool
True if the sub-sampled distribution is within tolerance
False otherwise
|
scripts/openmic_split.py
|
check_prob
|
cagnolone/openmic-2018
| 56
|
python
|
def check_prob(label_matrix, idx, prob_ratio):
'Check that the probabilities in a sub-sample\n are within a tolerance of the full population.\n\n Parameters\n ----------\n label_matrix : pd.DataFrame\n Array of label assignments\n\n idx : iterable\n Indices of the target sub-sample\n\n prob_ratio:\n The target probability ratio\n\n Returns\n -------\n check_passed : bool\n True if the sub-sampled distribution is within tolerance\n False otherwise\n '
(min_prob, max_prob) = sorted([prob_ratio, (1.0 / prob_ratio)])
all_dist_p = ((label_matrix > 0).sum() / label_matrix.count())
all_dist_n = ((label_matrix <= 0).sum() / label_matrix.count())
sub_dist_p = ((label_matrix.loc[idx] > 0).sum() / label_matrix.loc[idx].count())
sub_dist_n = ((label_matrix.loc[idx] <= 0).sum() / label_matrix.loc[idx].count())
return (np.all(((min_prob * all_dist_p.values) <= sub_dist_p.values)) and np.all((sub_dist_p.values <= (max_prob * all_dist_p.values))) and np.all(((min_prob * all_dist_n.values) <= sub_dist_n.values)) and np.all((sub_dist_n.values <= (max_prob * all_dist_n.values))))
|
def check_prob(label_matrix, idx, prob_ratio):
'Check that the probabilities in a sub-sample\n are within a tolerance of the full population.\n\n Parameters\n ----------\n label_matrix : pd.DataFrame\n Array of label assignments\n\n idx : iterable\n Indices of the target sub-sample\n\n prob_ratio:\n The target probability ratio\n\n Returns\n -------\n check_passed : bool\n True if the sub-sampled distribution is within tolerance\n False otherwise\n '
(min_prob, max_prob) = sorted([prob_ratio, (1.0 / prob_ratio)])
all_dist_p = ((label_matrix > 0).sum() / label_matrix.count())
all_dist_n = ((label_matrix <= 0).sum() / label_matrix.count())
sub_dist_p = ((label_matrix.loc[idx] > 0).sum() / label_matrix.loc[idx].count())
sub_dist_n = ((label_matrix.loc[idx] <= 0).sum() / label_matrix.loc[idx].count())
return (np.all(((min_prob * all_dist_p.values) <= sub_dist_p.values)) and np.all((sub_dist_p.values <= (max_prob * all_dist_p.values))) and np.all(((min_prob * all_dist_n.values) <= sub_dist_n.values)) and np.all((sub_dist_n.values <= (max_prob * all_dist_n.values))))<|docstring|>Check that the probabilities in a sub-sample
are within a tolerance of the full population.
Parameters
----------
label_matrix : pd.DataFrame
Array of label assignments
idx : iterable
Indices of the target sub-sample
prob_ratio:
The target probability ratio
Returns
-------
check_passed : bool
True if the sub-sampled distribution is within tolerance
False otherwise<|endoftext|>
|
7c062dc0a0719591dc4966b44c259ea75a4e59286bce8145c06b66e5b2985a76
|
def make_partitions(metadata, labels, seed, num_splits, ratio, prob_ratio, dupe_file=None):
"Partition the open-mic data into train-test splits.\n\n The partitioning logic is as follows:\n\n 1. Match each track with its most positive label association\n 1a. if no positive associations are found, label it as '_negative'\n 2. Use sklearn StratifiedShuffleSplit to make balanced train-test partitions\n 3. Save each partition as two index csv files\n\n Parameters\n ----------\n metadata : str\n Path to metadata CSV file\n\n labels : str\n Path to sparse labels CSV file\n\n seed : None, np.random.RandomState, or int\n Random seed\n\n num_splits : int > 0\n Number of splits to generate\n\n ratio : float in [0, 1]\n Fraction of data to separate for training\n\n prob_ratio : float in [0, 1]\n Minimum probability ratio for P(Y | train) (or P(Y | test)) to P(Y)\n "
(sample_keys, artist_labels, label_matrix) = load_label_matrix(metadata, labels, dupe_file)
splitter = StratifiedShuffleSplit(n_splits=(num_splits * 1000), random_state=seed, test_size=(1 - ratio))
labels = artist_labels.idxmax(axis=1)
fold = 0
for (artist_train_idx, artist_test_idx) in tqdm(splitter.split(labels, labels)):
train_artists = artist_labels.index[artist_train_idx]
test_artists = artist_labels.index[artist_test_idx]
train_idx = sample_keys[sample_keys['artist_id'].isin(train_artists)]['sample_key'].sort_values()
test_idx = sample_keys[sample_keys['artist_id'].isin(test_artists)]['sample_key'].sort_values()
if (set(train_idx) & set(test_idx)):
raise RuntimeError('Train and test indices overlap!')
if (check_prob(label_matrix, train_idx, prob_ratio) and check_prob(label_matrix, test_idx, prob_ratio)):
fold += 1
train_idx.to_csv('split{:02d}_train.csv'.format(fold), index=False)
test_idx.to_csv('split{:02d}_test.csv'.format(fold), index=False)
if (fold >= num_splits):
break
if (fold < num_splits):
raise ValueError('Unable to find sufficient splits. Try lowering the probability ratio tolerance.')
|
Partition the open-mic data into train-test splits.
The partitioning logic is as follows:
1. Match each track with its most positive label association
1a. if no positive associations are found, label it as '_negative'
2. Use sklearn StratifiedShuffleSplit to make balanced train-test partitions
3. Save each partition as two index csv files
Parameters
----------
metadata : str
Path to metadata CSV file
labels : str
Path to sparse labels CSV file
seed : None, np.random.RandomState, or int
Random seed
num_splits : int > 0
Number of splits to generate
ratio : float in [0, 1]
Fraction of data to separate for training
prob_ratio : float in [0, 1]
Minimum probability ratio for P(Y | train) (or P(Y | test)) to P(Y)
|
scripts/openmic_split.py
|
make_partitions
|
cagnolone/openmic-2018
| 56
|
python
|
def make_partitions(metadata, labels, seed, num_splits, ratio, prob_ratio, dupe_file=None):
"Partition the open-mic data into train-test splits.\n\n The partitioning logic is as follows:\n\n 1. Match each track with its most positive label association\n 1a. if no positive associations are found, label it as '_negative'\n 2. Use sklearn StratifiedShuffleSplit to make balanced train-test partitions\n 3. Save each partition as two index csv files\n\n Parameters\n ----------\n metadata : str\n Path to metadata CSV file\n\n labels : str\n Path to sparse labels CSV file\n\n seed : None, np.random.RandomState, or int\n Random seed\n\n num_splits : int > 0\n Number of splits to generate\n\n ratio : float in [0, 1]\n Fraction of data to separate for training\n\n prob_ratio : float in [0, 1]\n Minimum probability ratio for P(Y | train) (or P(Y | test)) to P(Y)\n "
(sample_keys, artist_labels, label_matrix) = load_label_matrix(metadata, labels, dupe_file)
splitter = StratifiedShuffleSplit(n_splits=(num_splits * 1000), random_state=seed, test_size=(1 - ratio))
labels = artist_labels.idxmax(axis=1)
fold = 0
for (artist_train_idx, artist_test_idx) in tqdm(splitter.split(labels, labels)):
train_artists = artist_labels.index[artist_train_idx]
test_artists = artist_labels.index[artist_test_idx]
train_idx = sample_keys[sample_keys['artist_id'].isin(train_artists)]['sample_key'].sort_values()
test_idx = sample_keys[sample_keys['artist_id'].isin(test_artists)]['sample_key'].sort_values()
if (set(train_idx) & set(test_idx)):
raise RuntimeError('Train and test indices overlap!')
if (check_prob(label_matrix, train_idx, prob_ratio) and check_prob(label_matrix, test_idx, prob_ratio)):
fold += 1
train_idx.to_csv('split{:02d}_train.csv'.format(fold), index=False)
test_idx.to_csv('split{:02d}_test.csv'.format(fold), index=False)
if (fold >= num_splits):
break
if (fold < num_splits):
raise ValueError('Unable to find sufficient splits. Try lowering the probability ratio tolerance.')
|
def make_partitions(metadata, labels, seed, num_splits, ratio, prob_ratio, dupe_file=None):
"Partition the open-mic data into train-test splits.\n\n The partitioning logic is as follows:\n\n 1. Match each track with its most positive label association\n 1a. if no positive associations are found, label it as '_negative'\n 2. Use sklearn StratifiedShuffleSplit to make balanced train-test partitions\n 3. Save each partition as two index csv files\n\n Parameters\n ----------\n metadata : str\n Path to metadata CSV file\n\n labels : str\n Path to sparse labels CSV file\n\n seed : None, np.random.RandomState, or int\n Random seed\n\n num_splits : int > 0\n Number of splits to generate\n\n ratio : float in [0, 1]\n Fraction of data to separate for training\n\n prob_ratio : float in [0, 1]\n Minimum probability ratio for P(Y | train) (or P(Y | test)) to P(Y)\n "
(sample_keys, artist_labels, label_matrix) = load_label_matrix(metadata, labels, dupe_file)
splitter = StratifiedShuffleSplit(n_splits=(num_splits * 1000), random_state=seed, test_size=(1 - ratio))
labels = artist_labels.idxmax(axis=1)
fold = 0
for (artist_train_idx, artist_test_idx) in tqdm(splitter.split(labels, labels)):
train_artists = artist_labels.index[artist_train_idx]
test_artists = artist_labels.index[artist_test_idx]
train_idx = sample_keys[sample_keys['artist_id'].isin(train_artists)]['sample_key'].sort_values()
test_idx = sample_keys[sample_keys['artist_id'].isin(test_artists)]['sample_key'].sort_values()
if (set(train_idx) & set(test_idx)):
raise RuntimeError('Train and test indices overlap!')
if (check_prob(label_matrix, train_idx, prob_ratio) and check_prob(label_matrix, test_idx, prob_ratio)):
fold += 1
train_idx.to_csv('split{:02d}_train.csv'.format(fold), index=False)
test_idx.to_csv('split{:02d}_test.csv'.format(fold), index=False)
if (fold >= num_splits):
break
if (fold < num_splits):
raise ValueError('Unable to find sufficient splits. Try lowering the probability ratio tolerance.')<|docstring|>Partition the open-mic data into train-test splits.
The partitioning logic is as follows:
1. Match each track with its most positive label association
1a. if no positive associations are found, label it as '_negative'
2. Use sklearn StratifiedShuffleSplit to make balanced train-test partitions
3. Save each partition as two index csv files
Parameters
----------
metadata : str
Path to metadata CSV file
labels : str
Path to sparse labels CSV file
seed : None, np.random.RandomState, or int
Random seed
num_splits : int > 0
Number of splits to generate
ratio : float in [0, 1]
Fraction of data to separate for training
prob_ratio : float in [0, 1]
Minimum probability ratio for P(Y | train) (or P(Y | test)) to P(Y)<|endoftext|>
|
a11bc9d6cf565fccd250cd609b69dc808bcc2a2bce6251745df9cb3dcbb6d4a0
|
def velocity_confidence(data, vkey='velocity', copy=False):
"Computes confidences of velocities.\n\n .. code:: python\n\n scv.tl.velocity_confidence(adata)\n scv.pl.scatter(adata, color='velocity_confidence', perc=[2,98])\n\n .. image:: https://user-images.githubusercontent.com/31883718/69626334-b6df5200-1048-11ea-9171-495845c5bc7a.png\n :width: 600px\n\n\n Arguments\n ---------\n data: :class:`~anndata.AnnData`\n Annotated data matrix.\n vkey: `str` (default: `'velocity'`)\n Name of velocity estimates to be used.\n copy: `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Returns or updates `adata` with the attributes\n velocity_length: `.obs`\n Length of the velocity vectors for each individual cell\n velocity_confidence: `.obs`\n Confidence for each cell\n "
adata = (data.copy() if copy else data)
if (vkey not in adata.layers.keys()):
raise ValueError('You need to run `tl.velocity` first.')
V = np.array(adata.layers[vkey])
if ((vkey + '_genes') in adata.var.keys()):
V = V[(:, np.array(adata.var[(vkey + '_genes')], dtype=bool))]
nans = np.isnan(np.sum(V, axis=0))
if np.any(nans):
V = V[(:, (~ nans))]
indices = get_indices(dist=adata.uns['neighbors']['distances'])[0]
V -= V.mean(1)[(:, None)]
V_norm = norm(V)
R = np.zeros(adata.n_obs)
for i in range(adata.n_obs):
Vi_neighs = V[indices[i]]
Vi_neighs -= Vi_neighs.mean(1)[(:, None)]
R[i] = np.mean((np.einsum('ij, j', Vi_neighs, V[i]) / (norm(Vi_neighs) * V_norm[i])[(None, :)]))
adata.obs[(vkey + '_length')] = V_norm.round(2)
adata.obs[(vkey + '_confidence')] = R
logg.hint((("added '" + vkey) + "_confidence' (adata.obs)"))
if ((vkey + '_confidence_transition') not in adata.obs.keys()):
velocity_confidence_transition(adata, vkey)
return (adata if copy else None)
|
Computes confidences of velocities.
.. code:: python
scv.tl.velocity_confidence(adata)
scv.pl.scatter(adata, color='velocity_confidence', perc=[2,98])
.. image:: https://user-images.githubusercontent.com/31883718/69626334-b6df5200-1048-11ea-9171-495845c5bc7a.png
:width: 600px
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_length: `.obs`
Length of the velocity vectors for each individual cell
velocity_confidence: `.obs`
Confidence for each cell
|
scvelo/tools/velocity_confidence.py
|
velocity_confidence
|
stefanpeidli/scvelo
| 1
|
python
|
def velocity_confidence(data, vkey='velocity', copy=False):
"Computes confidences of velocities.\n\n .. code:: python\n\n scv.tl.velocity_confidence(adata)\n scv.pl.scatter(adata, color='velocity_confidence', perc=[2,98])\n\n .. image:: https://user-images.githubusercontent.com/31883718/69626334-b6df5200-1048-11ea-9171-495845c5bc7a.png\n :width: 600px\n\n\n Arguments\n ---------\n data: :class:`~anndata.AnnData`\n Annotated data matrix.\n vkey: `str` (default: `'velocity'`)\n Name of velocity estimates to be used.\n copy: `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Returns or updates `adata` with the attributes\n velocity_length: `.obs`\n Length of the velocity vectors for each individual cell\n velocity_confidence: `.obs`\n Confidence for each cell\n "
adata = (data.copy() if copy else data)
if (vkey not in adata.layers.keys()):
raise ValueError('You need to run `tl.velocity` first.')
V = np.array(adata.layers[vkey])
if ((vkey + '_genes') in adata.var.keys()):
V = V[(:, np.array(adata.var[(vkey + '_genes')], dtype=bool))]
nans = np.isnan(np.sum(V, axis=0))
if np.any(nans):
V = V[(:, (~ nans))]
indices = get_indices(dist=adata.uns['neighbors']['distances'])[0]
V -= V.mean(1)[(:, None)]
V_norm = norm(V)
R = np.zeros(adata.n_obs)
for i in range(adata.n_obs):
Vi_neighs = V[indices[i]]
Vi_neighs -= Vi_neighs.mean(1)[(:, None)]
R[i] = np.mean((np.einsum('ij, j', Vi_neighs, V[i]) / (norm(Vi_neighs) * V_norm[i])[(None, :)]))
adata.obs[(vkey + '_length')] = V_norm.round(2)
adata.obs[(vkey + '_confidence')] = R
logg.hint((("added '" + vkey) + "_confidence' (adata.obs)"))
if ((vkey + '_confidence_transition') not in adata.obs.keys()):
velocity_confidence_transition(adata, vkey)
return (adata if copy else None)
|
def velocity_confidence(data, vkey='velocity', copy=False):
"Computes confidences of velocities.\n\n .. code:: python\n\n scv.tl.velocity_confidence(adata)\n scv.pl.scatter(adata, color='velocity_confidence', perc=[2,98])\n\n .. image:: https://user-images.githubusercontent.com/31883718/69626334-b6df5200-1048-11ea-9171-495845c5bc7a.png\n :width: 600px\n\n\n Arguments\n ---------\n data: :class:`~anndata.AnnData`\n Annotated data matrix.\n vkey: `str` (default: `'velocity'`)\n Name of velocity estimates to be used.\n copy: `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Returns or updates `adata` with the attributes\n velocity_length: `.obs`\n Length of the velocity vectors for each individual cell\n velocity_confidence: `.obs`\n Confidence for each cell\n "
adata = (data.copy() if copy else data)
if (vkey not in adata.layers.keys()):
raise ValueError('You need to run `tl.velocity` first.')
V = np.array(adata.layers[vkey])
if ((vkey + '_genes') in adata.var.keys()):
V = V[(:, np.array(adata.var[(vkey + '_genes')], dtype=bool))]
nans = np.isnan(np.sum(V, axis=0))
if np.any(nans):
V = V[(:, (~ nans))]
indices = get_indices(dist=adata.uns['neighbors']['distances'])[0]
V -= V.mean(1)[(:, None)]
V_norm = norm(V)
R = np.zeros(adata.n_obs)
for i in range(adata.n_obs):
Vi_neighs = V[indices[i]]
Vi_neighs -= Vi_neighs.mean(1)[(:, None)]
R[i] = np.mean((np.einsum('ij, j', Vi_neighs, V[i]) / (norm(Vi_neighs) * V_norm[i])[(None, :)]))
adata.obs[(vkey + '_length')] = V_norm.round(2)
adata.obs[(vkey + '_confidence')] = R
logg.hint((("added '" + vkey) + "_confidence' (adata.obs)"))
if ((vkey + '_confidence_transition') not in adata.obs.keys()):
velocity_confidence_transition(adata, vkey)
return (adata if copy else None)<|docstring|>Computes confidences of velocities.
.. code:: python
scv.tl.velocity_confidence(adata)
scv.pl.scatter(adata, color='velocity_confidence', perc=[2,98])
.. image:: https://user-images.githubusercontent.com/31883718/69626334-b6df5200-1048-11ea-9171-495845c5bc7a.png
:width: 600px
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_length: `.obs`
Length of the velocity vectors for each individual cell
velocity_confidence: `.obs`
Confidence for each cell<|endoftext|>
|
94d7a14df2fe579810ceb93f407f39c91f01f6c26e8ae5db92197fc3e10bc639
|
def velocity_confidence_transition(data, vkey='velocity', scale=10, copy=False):
"Computes confidences of velocity transitions.\n\n Arguments\n ---------\n data: :class:`~anndata.AnnData`\n Annotated data matrix.\n vkey: `str` (default: `'velocity'`)\n Name of velocity estimates to be used.\n scale: `float` (default: 10)\n Scale parameter of gaussian kernel.\n copy: `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Returns or updates `adata` with the attributes\n velocity_confidence_transition: `.obs`\n Confidence of transition for each cell\n "
adata = (data.copy() if copy else data)
if (vkey not in adata.layers.keys()):
raise ValueError('You need to run `tl.velocity` first.')
if ((vkey + '_genes') in adata.var.keys()):
idx = np.array(adata.var[(vkey + '_genes')], dtype=bool)
(X, V) = (adata.layers['Ms'][(:, idx)].copy(), adata.layers[vkey][(:, idx)].copy())
else:
(X, V) = (adata.layers['Ms'].copy(), adata.layers[vkey].copy())
nans = np.isnan(np.sum(V, axis=0))
if np.any(nans):
X = X[(:, (~ nans))]
V = V[(:, (~ nans))]
T = transition_matrix(adata, vkey=vkey, scale=scale)
dX = (T.dot(X) - X)
dX -= dX.mean(1)[(:, None)]
V -= V.mean(1)[(:, None)]
norms = (norm(dX) * norm(V))
norms += (norms == 0)
adata.obs[(vkey + '_confidence_transition')] = (prod_sum_var(dX, V) / norms)
logg.hint((("added '" + vkey) + "_confidence_transition' (adata.obs)"))
return (adata if copy else None)
|
Computes confidences of velocity transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_confidence_transition: `.obs`
Confidence of transition for each cell
|
scvelo/tools/velocity_confidence.py
|
velocity_confidence_transition
|
stefanpeidli/scvelo
| 1
|
python
|
def velocity_confidence_transition(data, vkey='velocity', scale=10, copy=False):
"Computes confidences of velocity transitions.\n\n Arguments\n ---------\n data: :class:`~anndata.AnnData`\n Annotated data matrix.\n vkey: `str` (default: `'velocity'`)\n Name of velocity estimates to be used.\n scale: `float` (default: 10)\n Scale parameter of gaussian kernel.\n copy: `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Returns or updates `adata` with the attributes\n velocity_confidence_transition: `.obs`\n Confidence of transition for each cell\n "
adata = (data.copy() if copy else data)
if (vkey not in adata.layers.keys()):
raise ValueError('You need to run `tl.velocity` first.')
if ((vkey + '_genes') in adata.var.keys()):
idx = np.array(adata.var[(vkey + '_genes')], dtype=bool)
(X, V) = (adata.layers['Ms'][(:, idx)].copy(), adata.layers[vkey][(:, idx)].copy())
else:
(X, V) = (adata.layers['Ms'].copy(), adata.layers[vkey].copy())
nans = np.isnan(np.sum(V, axis=0))
if np.any(nans):
X = X[(:, (~ nans))]
V = V[(:, (~ nans))]
T = transition_matrix(adata, vkey=vkey, scale=scale)
dX = (T.dot(X) - X)
dX -= dX.mean(1)[(:, None)]
V -= V.mean(1)[(:, None)]
norms = (norm(dX) * norm(V))
norms += (norms == 0)
adata.obs[(vkey + '_confidence_transition')] = (prod_sum_var(dX, V) / norms)
logg.hint((("added '" + vkey) + "_confidence_transition' (adata.obs)"))
return (adata if copy else None)
|
def velocity_confidence_transition(data, vkey='velocity', scale=10, copy=False):
"Computes confidences of velocity transitions.\n\n Arguments\n ---------\n data: :class:`~anndata.AnnData`\n Annotated data matrix.\n vkey: `str` (default: `'velocity'`)\n Name of velocity estimates to be used.\n scale: `float` (default: 10)\n Scale parameter of gaussian kernel.\n copy: `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Returns or updates `adata` with the attributes\n velocity_confidence_transition: `.obs`\n Confidence of transition for each cell\n "
adata = (data.copy() if copy else data)
if (vkey not in adata.layers.keys()):
raise ValueError('You need to run `tl.velocity` first.')
if ((vkey + '_genes') in adata.var.keys()):
idx = np.array(adata.var[(vkey + '_genes')], dtype=bool)
(X, V) = (adata.layers['Ms'][(:, idx)].copy(), adata.layers[vkey][(:, idx)].copy())
else:
(X, V) = (adata.layers['Ms'].copy(), adata.layers[vkey].copy())
nans = np.isnan(np.sum(V, axis=0))
if np.any(nans):
X = X[(:, (~ nans))]
V = V[(:, (~ nans))]
T = transition_matrix(adata, vkey=vkey, scale=scale)
dX = (T.dot(X) - X)
dX -= dX.mean(1)[(:, None)]
V -= V.mean(1)[(:, None)]
norms = (norm(dX) * norm(V))
norms += (norms == 0)
adata.obs[(vkey + '_confidence_transition')] = (prod_sum_var(dX, V) / norms)
logg.hint((("added '" + vkey) + "_confidence_transition' (adata.obs)"))
return (adata if copy else None)<|docstring|>Computes confidences of velocity transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_confidence_transition: `.obs`
Confidence of transition for each cell<|endoftext|>
|
e2af20665f3948b147e0fe2e653cb30550a97a90b0020b3c293502bd36328249
|
def get_edges(self, arr):
' Input is an array of intended connections. Going in order, if any\n of the connections do not exist, return False. If they all exist,\n return the sum total of all the connections.\n '
total = 0
curr = arr[0]
if (not self.has_vert(curr)):
return (False, 0)
for city in arr:
if (city == curr):
continue
if (city not in self.graph[curr].keys()):
return (False, 0)
total += self.graph[curr][city]
curr = city
return (True, total)
|
Input is an array of intended connections. Going in order, if any
of the connections do not exist, return False. If they all exist,
return the sum total of all the connections.
|
challenges/get_edges/get_edges.py
|
get_edges
|
ChrisSeattle/data-structures-and-algorithms
| 0
|
python
|
def get_edges(self, arr):
' Input is an array of intended connections. Going in order, if any\n of the connections do not exist, return False. If they all exist,\n return the sum total of all the connections.\n '
total = 0
curr = arr[0]
if (not self.has_vert(curr)):
return (False, 0)
for city in arr:
if (city == curr):
continue
if (city not in self.graph[curr].keys()):
return (False, 0)
total += self.graph[curr][city]
curr = city
return (True, total)
|
def get_edges(self, arr):
' Input is an array of intended connections. Going in order, if any\n of the connections do not exist, return False. If they all exist,\n return the sum total of all the connections.\n '
total = 0
curr = arr[0]
if (not self.has_vert(curr)):
return (False, 0)
for city in arr:
if (city == curr):
continue
if (city not in self.graph[curr].keys()):
return (False, 0)
total += self.graph[curr][city]
curr = city
return (True, total)<|docstring|>Input is an array of intended connections. Going in order, if any
of the connections do not exist, return False. If they all exist,
return the sum total of all the connections.<|endoftext|>
|
fa05f7cc679dc90a7fa8a67f19fd9959a0f4935e438fb7c19d7863640da2d793
|
def breadth_first(self, val):
' Accepts a starting node as input, then traverses all nodes/vertices\n of the graph in a breadth first approach. Prints these out in order\n they were visited.\n This could be simplified (and not use Queue) if we always work\n with the key-value pairs of node name to dictionary of connections.\n '
if (not self.has_vert(val)):
raise ValueError('Starting vertice is not in the Graph')
visited = dict()
q = Queue()
startNode = Node(val)
q.enqueue(startNode)
visited[val] = True
result = []
while q:
for n in self.graph[q.front.val].keys():
if (n not in visited):
visited[n] = True
newNode = Node(n)
q.enqueue(newNode)
result.append(q.dequeue().val)
return result
|
Accepts a starting node as input, then traverses all nodes/vertices
of the graph in a breadth first approach. Prints these out in order
they were visited.
This could be simplified (and not use Queue) if we always work
with the key-value pairs of node name to dictionary of connections.
|
challenges/get_edges/get_edges.py
|
breadth_first
|
ChrisSeattle/data-structures-and-algorithms
| 0
|
python
|
def breadth_first(self, val):
' Accepts a starting node as input, then traverses all nodes/vertices\n of the graph in a breadth first approach. Prints these out in order\n they were visited.\n This could be simplified (and not use Queue) if we always work\n with the key-value pairs of node name to dictionary of connections.\n '
if (not self.has_vert(val)):
raise ValueError('Starting vertice is not in the Graph')
visited = dict()
q = Queue()
startNode = Node(val)
q.enqueue(startNode)
visited[val] = True
result = []
while q:
for n in self.graph[q.front.val].keys():
if (n not in visited):
visited[n] = True
newNode = Node(n)
q.enqueue(newNode)
result.append(q.dequeue().val)
return result
|
def breadth_first(self, val):
' Accepts a starting node as input, then traverses all nodes/vertices\n of the graph in a breadth first approach. Prints these out in order\n they were visited.\n This could be simplified (and not use Queue) if we always work\n with the key-value pairs of node name to dictionary of connections.\n '
if (not self.has_vert(val)):
raise ValueError('Starting vertice is not in the Graph')
visited = dict()
q = Queue()
startNode = Node(val)
q.enqueue(startNode)
visited[val] = True
result = []
while q:
for n in self.graph[q.front.val].keys():
if (n not in visited):
visited[n] = True
newNode = Node(n)
q.enqueue(newNode)
result.append(q.dequeue().val)
return result<|docstring|>Accepts a starting node as input, then traverses all nodes/vertices
of the graph in a breadth first approach. Prints these out in order
they were visited.
This could be simplified (and not use Queue) if we always work
with the key-value pairs of node name to dictionary of connections.<|endoftext|>
|
3cc6605f8506228870e3e02eb02df440b9034a3debcec64ca26934fe79bfd946
|
def add_vert(self, val):
' Adding Vertice to graph if it does not already exist\n For now we use dictionary key as vertice name with values holding\n dictionary of connected vertice name : connection weight\n '
rel = []
err = ''
if isinstance(val, dict):
(val, rel) = (list(val.keys()), list(val.values()))
else:
if (not isinstance(val, list)):
val = list(val)
for ea in val:
rel.append({})
for i in range(len(val)):
if self.has_vert(val[i]):
err += f'{val[i]} '
else:
self.graph[val[i]] = rel[i]
if (len(err) > 0):
raise ValueError(f'Vertice(s) {err} already present')
return True
|
Adding Vertice to graph if it does not already exist
For now we use dictionary key as vertice name with values holding
dictionary of connected vertice name : connection weight
|
challenges/get_edges/get_edges.py
|
add_vert
|
ChrisSeattle/data-structures-and-algorithms
| 0
|
python
|
def add_vert(self, val):
' Adding Vertice to graph if it does not already exist\n For now we use dictionary key as vertice name with values holding\n dictionary of connected vertice name : connection weight\n '
rel = []
err =
if isinstance(val, dict):
(val, rel) = (list(val.keys()), list(val.values()))
else:
if (not isinstance(val, list)):
val = list(val)
for ea in val:
rel.append({})
for i in range(len(val)):
if self.has_vert(val[i]):
err += f'{val[i]} '
else:
self.graph[val[i]] = rel[i]
if (len(err) > 0):
raise ValueError(f'Vertice(s) {err} already present')
return True
|
def add_vert(self, val):
' Adding Vertice to graph if it does not already exist\n For now we use dictionary key as vertice name with values holding\n dictionary of connected vertice name : connection weight\n '
rel = []
err =
if isinstance(val, dict):
(val, rel) = (list(val.keys()), list(val.values()))
else:
if (not isinstance(val, list)):
val = list(val)
for ea in val:
rel.append({})
for i in range(len(val)):
if self.has_vert(val[i]):
err += f'{val[i]} '
else:
self.graph[val[i]] = rel[i]
if (len(err) > 0):
raise ValueError(f'Vertice(s) {err} already present')
return True<|docstring|>Adding Vertice to graph if it does not already exist
For now we use dictionary key as vertice name with values holding
dictionary of connected vertice name : connection weight<|endoftext|>
|
a529c90308879b2376b31294e1b66e7c57c777275b0801efc87867d3818ed005
|
def has_vert(self, val):
' Check to see if this vertice is already in the graph.\n For now, check if the name is a key in self.graph\n '
return (val in self.graph.keys())
|
Check to see if this vertice is already in the graph.
For now, check if the name is a key in self.graph
|
challenges/get_edges/get_edges.py
|
has_vert
|
ChrisSeattle/data-structures-and-algorithms
| 0
|
python
|
def has_vert(self, val):
' Check to see if this vertice is already in the graph.\n For now, check if the name is a key in self.graph\n '
return (val in self.graph.keys())
|
def has_vert(self, val):
' Check to see if this vertice is already in the graph.\n For now, check if the name is a key in self.graph\n '
return (val in self.graph.keys())<|docstring|>Check to see if this vertice is already in the graph.
For now, check if the name is a key in self.graph<|endoftext|>
|
56feba04931659fc48ae9bdf8e76c1846e4f40c44a8c9ada5a690c6101cb6fae
|
def add_edge(self, v1, v2, weight):
' This is adding a directional weighted connection from v1 to v2\n v1 and v2 must be already existing vertice names in the graph\n '
if (not self.has_vert(v1)):
raise ValueError('First given Vertice is not present')
if (not self.has_vert(v2)):
raise ValueError('Second given Vertice is not present')
self.graph[v1][v2] = weight
|
This is adding a directional weighted connection from v1 to v2
v1 and v2 must be already existing vertice names in the graph
|
challenges/get_edges/get_edges.py
|
add_edge
|
ChrisSeattle/data-structures-and-algorithms
| 0
|
python
|
def add_edge(self, v1, v2, weight):
' This is adding a directional weighted connection from v1 to v2\n v1 and v2 must be already existing vertice names in the graph\n '
if (not self.has_vert(v1)):
raise ValueError('First given Vertice is not present')
if (not self.has_vert(v2)):
raise ValueError('Second given Vertice is not present')
self.graph[v1][v2] = weight
|
def add_edge(self, v1, v2, weight):
' This is adding a directional weighted connection from v1 to v2\n v1 and v2 must be already existing vertice names in the graph\n '
if (not self.has_vert(v1)):
raise ValueError('First given Vertice is not present')
if (not self.has_vert(v2)):
raise ValueError('Second given Vertice is not present')
self.graph[v1][v2] = weight<|docstring|>This is adding a directional weighted connection from v1 to v2
v1 and v2 must be already existing vertice names in the graph<|endoftext|>
|
b91ed5a22cac72b0dc14194ae84c48ce2847e4399c2e3649731fa0141a0bd823
|
def get_neighbors(self, val):
' Return all verticies that the given val vertice connects out to\n '
if (val not in self.graph.keys()):
raise ValueError('That vertice is not present')
return list(self.graph[val].keys())
|
Return all verticies that the given val vertice connects out to
|
challenges/get_edges/get_edges.py
|
get_neighbors
|
ChrisSeattle/data-structures-and-algorithms
| 0
|
python
|
def get_neighbors(self, val):
' \n '
if (val not in self.graph.keys()):
raise ValueError('That vertice is not present')
return list(self.graph[val].keys())
|
def get_neighbors(self, val):
' \n '
if (val not in self.graph.keys()):
raise ValueError('That vertice is not present')
return list(self.graph[val].keys())<|docstring|>Return all verticies that the given val vertice connects out to<|endoftext|>
|
3901700a2bbf8fd37630d89cc47cc448d3dd9cf9aa969ed786261bb65f49bc80
|
def pert_lab(image, label, grad_fun, num_iter, eps, weight=None):
'image is in Lab space\n grad_fun is a function which generates a gradient for a Lab-space image\n eps is either a sequence of length num_iter or a constant\n num_iter is the number of iterations to be performed\n weight is a vector which determines the constaint in each pixel as eps*weight[i].\n should be of length image.flatten()/3, one for each pixel. I imagine this is for the sobel filter.\n It can also be a scalar, but this is dumb and you should just put the weight into epsilon\n outputs delta. A vector of shape image such that image + delta is the perturbed image.'
if (weight is None):
weight = 1
else:
weight = weight.to(image.device)
delta = torch.zeros(image.shape, device=image.device)
for i in range(num_iter):
grad = grad_fun((image + delta))
grad_norm = torch.norm(grad, p=2, dim=1)
delta = torch.where((grad_norm == 0).repeat(1, 3, 1, 1), delta, (delta + (((weight * eps) * grad) / grad_norm)))
return delta
|
image is in Lab space
grad_fun is a function which generates a gradient for a Lab-space image
eps is either a sequence of length num_iter or a constant
num_iter is the number of iterations to be performed
weight is a vector which determines the constaint in each pixel as eps*weight[i].
should be of length image.flatten()/3, one for each pixel. I imagine this is for the sobel filter.
It can also be a scalar, but this is dumb and you should just put the weight into epsilon
outputs delta. A vector of shape image such that image + delta is the perturbed image.
|
ColorEdgeAwarePerturbs.py
|
pert_lab
|
rbassett3/Color-and-Edge-Aware-Perturbations
| 3
|
python
|
def pert_lab(image, label, grad_fun, num_iter, eps, weight=None):
'image is in Lab space\n grad_fun is a function which generates a gradient for a Lab-space image\n eps is either a sequence of length num_iter or a constant\n num_iter is the number of iterations to be performed\n weight is a vector which determines the constaint in each pixel as eps*weight[i].\n should be of length image.flatten()/3, one for each pixel. I imagine this is for the sobel filter.\n It can also be a scalar, but this is dumb and you should just put the weight into epsilon\n outputs delta. A vector of shape image such that image + delta is the perturbed image.'
if (weight is None):
weight = 1
else:
weight = weight.to(image.device)
delta = torch.zeros(image.shape, device=image.device)
for i in range(num_iter):
grad = grad_fun((image + delta))
grad_norm = torch.norm(grad, p=2, dim=1)
delta = torch.where((grad_norm == 0).repeat(1, 3, 1, 1), delta, (delta + (((weight * eps) * grad) / grad_norm)))
return delta
|
def pert_lab(image, label, grad_fun, num_iter, eps, weight=None):
'image is in Lab space\n grad_fun is a function which generates a gradient for a Lab-space image\n eps is either a sequence of length num_iter or a constant\n num_iter is the number of iterations to be performed\n weight is a vector which determines the constaint in each pixel as eps*weight[i].\n should be of length image.flatten()/3, one for each pixel. I imagine this is for the sobel filter.\n It can also be a scalar, but this is dumb and you should just put the weight into epsilon\n outputs delta. A vector of shape image such that image + delta is the perturbed image.'
if (weight is None):
weight = 1
else:
weight = weight.to(image.device)
delta = torch.zeros(image.shape, device=image.device)
for i in range(num_iter):
grad = grad_fun((image + delta))
grad_norm = torch.norm(grad, p=2, dim=1)
delta = torch.where((grad_norm == 0).repeat(1, 3, 1, 1), delta, (delta + (((weight * eps) * grad) / grad_norm)))
return delta<|docstring|>image is in Lab space
grad_fun is a function which generates a gradient for a Lab-space image
eps is either a sequence of length num_iter or a constant
num_iter is the number of iterations to be performed
weight is a vector which determines the constaint in each pixel as eps*weight[i].
should be of length image.flatten()/3, one for each pixel. I imagine this is for the sobel filter.
It can also be a scalar, but this is dumb and you should just put the weight into epsilon
outputs delta. A vector of shape image such that image + delta is the perturbed image.<|endoftext|>
|
79b5104692d49b5624597b366b110bca0963f8907157495e72392d3d857b2035
|
def pert_rgb(image, label, model, num_iter, eps, targeted=False, weight=None, do_imagenet_scale=True, binary=False):
'\n image is in RGB space and in [0,1]\n model maps rgb image to the logits\n eps is either a sequence of length num_iter or a constant\n num_iter is the number of iterations to be performed\n weight is a vector which determines the constaint in each pixel as eps*weight[i].\n should be of length image.flatten()/3, one for each pixel. This can be use for an edge filter.\n It can also be a scalar, but this is dumb and you should just put the weight into epsilon\n outputs delta. A vector of shape image such that image + delta is the perturbed image.\n do_imagenet_scale is a boolean indicating whether the typical scaling on imagenet images should be used\n binary uses BCELossWithLogits instead of CrossEntropyLoss'
if (len(image.shape) == 3):
image = image.unsqueeze(0)
img_lab = rgb2lab(image)
if (targeted == False):
grad_fun = (lambda img: grad_lab2lab(model, img, label, do_imagenet_scale=do_imagenet_scale, binary=binary))
else:
grad_fun = (lambda img: (- grad_lab2lab(model, img, label, do_imagenet_scale=do_imagenet_scale, binary=binary)))
delta_lab = pert_lab(img_lab, label, grad_fun, num_iter, eps, weight=weight)
with torch.no_grad():
pert_img = torch.clamp(lab2rgb((img_lab + delta_lab)), 0, 1)
return pert_img
|
image is in RGB space and in [0,1]
model maps rgb image to the logits
eps is either a sequence of length num_iter or a constant
num_iter is the number of iterations to be performed
weight is a vector which determines the constaint in each pixel as eps*weight[i].
should be of length image.flatten()/3, one for each pixel. This can be use for an edge filter.
It can also be a scalar, but this is dumb and you should just put the weight into epsilon
outputs delta. A vector of shape image such that image + delta is the perturbed image.
do_imagenet_scale is a boolean indicating whether the typical scaling on imagenet images should be used
binary uses BCELossWithLogits instead of CrossEntropyLoss
|
ColorEdgeAwarePerturbs.py
|
pert_rgb
|
rbassett3/Color-and-Edge-Aware-Perturbations
| 3
|
python
|
def pert_rgb(image, label, model, num_iter, eps, targeted=False, weight=None, do_imagenet_scale=True, binary=False):
'\n image is in RGB space and in [0,1]\n model maps rgb image to the logits\n eps is either a sequence of length num_iter or a constant\n num_iter is the number of iterations to be performed\n weight is a vector which determines the constaint in each pixel as eps*weight[i].\n should be of length image.flatten()/3, one for each pixel. This can be use for an edge filter.\n It can also be a scalar, but this is dumb and you should just put the weight into epsilon\n outputs delta. A vector of shape image such that image + delta is the perturbed image.\n do_imagenet_scale is a boolean indicating whether the typical scaling on imagenet images should be used\n binary uses BCELossWithLogits instead of CrossEntropyLoss'
if (len(image.shape) == 3):
image = image.unsqueeze(0)
img_lab = rgb2lab(image)
if (targeted == False):
grad_fun = (lambda img: grad_lab2lab(model, img, label, do_imagenet_scale=do_imagenet_scale, binary=binary))
else:
grad_fun = (lambda img: (- grad_lab2lab(model, img, label, do_imagenet_scale=do_imagenet_scale, binary=binary)))
delta_lab = pert_lab(img_lab, label, grad_fun, num_iter, eps, weight=weight)
with torch.no_grad():
pert_img = torch.clamp(lab2rgb((img_lab + delta_lab)), 0, 1)
return pert_img
|
def pert_rgb(image, label, model, num_iter, eps, targeted=False, weight=None, do_imagenet_scale=True, binary=False):
'\n image is in RGB space and in [0,1]\n model maps rgb image to the logits\n eps is either a sequence of length num_iter or a constant\n num_iter is the number of iterations to be performed\n weight is a vector which determines the constaint in each pixel as eps*weight[i].\n should be of length image.flatten()/3, one for each pixel. This can be use for an edge filter.\n It can also be a scalar, but this is dumb and you should just put the weight into epsilon\n outputs delta. A vector of shape image such that image + delta is the perturbed image.\n do_imagenet_scale is a boolean indicating whether the typical scaling on imagenet images should be used\n binary uses BCELossWithLogits instead of CrossEntropyLoss'
if (len(image.shape) == 3):
image = image.unsqueeze(0)
img_lab = rgb2lab(image)
if (targeted == False):
grad_fun = (lambda img: grad_lab2lab(model, img, label, do_imagenet_scale=do_imagenet_scale, binary=binary))
else:
grad_fun = (lambda img: (- grad_lab2lab(model, img, label, do_imagenet_scale=do_imagenet_scale, binary=binary)))
delta_lab = pert_lab(img_lab, label, grad_fun, num_iter, eps, weight=weight)
with torch.no_grad():
pert_img = torch.clamp(lab2rgb((img_lab + delta_lab)), 0, 1)
return pert_img<|docstring|>image is in RGB space and in [0,1]
model maps rgb image to the logits
eps is either a sequence of length num_iter or a constant
num_iter is the number of iterations to be performed
weight is a vector which determines the constaint in each pixel as eps*weight[i].
should be of length image.flatten()/3, one for each pixel. This can be use for an edge filter.
It can also be a scalar, but this is dumb and you should just put the weight into epsilon
outputs delta. A vector of shape image such that image + delta is the perturbed image.
do_imagenet_scale is a boolean indicating whether the typical scaling on imagenet images should be used
binary uses BCELossWithLogits instead of CrossEntropyLoss<|endoftext|>
|
75c52e06e57745d535fa06bfb612f726af0c04cefb9f4cae44830b4cb2a7bdcd
|
def grad_lab2lab(model, input_img, label, do_imagenet_scale=True, binary=True):
'img assumed to be in [0,1].\n If the model uses the typical scaling of imagenet used in pytorch set do_imagenet_scale=True.\n See https://pytorch.org/docs/stable/torchvision/models.html'
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
if binary:
loss = BCEWithLogitsLoss()
else:
loss = CrossEntropyLoss()
model.eval()
if (torch.is_tensor(label) is False):
label = torch.tensor(label)
if (len(label.shape) == 0):
label = label.unsqueeze(0)
label = label.to(input_img.device)
img = input_img
if (len(img.shape) == 3):
img = img.unsqueeze(0)
img.requires_grad = True
rgb_img = lab2rgb(img)
if do_imagenet_scale:
scaled_img = imagenet_transform(rgb_img)
else:
scaled_img = rgb_img
out = loss(model(scaled_img), label)
out.backward()
return img.grad
|
img assumed to be in [0,1].
If the model uses the typical scaling of imagenet used in pytorch set do_imagenet_scale=True.
See https://pytorch.org/docs/stable/torchvision/models.html
|
ColorEdgeAwarePerturbs.py
|
grad_lab2lab
|
rbassett3/Color-and-Edge-Aware-Perturbations
| 3
|
python
|
def grad_lab2lab(model, input_img, label, do_imagenet_scale=True, binary=True):
'img assumed to be in [0,1].\n If the model uses the typical scaling of imagenet used in pytorch set do_imagenet_scale=True.\n See https://pytorch.org/docs/stable/torchvision/models.html'
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
if binary:
loss = BCEWithLogitsLoss()
else:
loss = CrossEntropyLoss()
model.eval()
if (torch.is_tensor(label) is False):
label = torch.tensor(label)
if (len(label.shape) == 0):
label = label.unsqueeze(0)
label = label.to(input_img.device)
img = input_img
if (len(img.shape) == 3):
img = img.unsqueeze(0)
img.requires_grad = True
rgb_img = lab2rgb(img)
if do_imagenet_scale:
scaled_img = imagenet_transform(rgb_img)
else:
scaled_img = rgb_img
out = loss(model(scaled_img), label)
out.backward()
return img.grad
|
def grad_lab2lab(model, input_img, label, do_imagenet_scale=True, binary=True):
'img assumed to be in [0,1].\n If the model uses the typical scaling of imagenet used in pytorch set do_imagenet_scale=True.\n See https://pytorch.org/docs/stable/torchvision/models.html'
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
if binary:
loss = BCEWithLogitsLoss()
else:
loss = CrossEntropyLoss()
model.eval()
if (torch.is_tensor(label) is False):
label = torch.tensor(label)
if (len(label.shape) == 0):
label = label.unsqueeze(0)
label = label.to(input_img.device)
img = input_img
if (len(img.shape) == 3):
img = img.unsqueeze(0)
img.requires_grad = True
rgb_img = lab2rgb(img)
if do_imagenet_scale:
scaled_img = imagenet_transform(rgb_img)
else:
scaled_img = rgb_img
out = loss(model(scaled_img), label)
out.backward()
return img.grad<|docstring|>img assumed to be in [0,1].
If the model uses the typical scaling of imagenet used in pytorch set do_imagenet_scale=True.
See https://pytorch.org/docs/stable/torchvision/models.html<|endoftext|>
|
1d3bb0534f55f51f5ccea995b563849886a8a5e62da5f8831d94e12ba1c288a6
|
def get_probs(model, img):
'Given an image that has not had the imagenet_transorm done,\n obtain the vector probabilities for each label in the ImageNet Dataset'
return torch.softmax(model(imagenet_transform(img)), 1)
|
Given an image that has not had the imagenet_transorm done,
obtain the vector probabilities for each label in the ImageNet Dataset
|
ColorEdgeAwarePerturbs.py
|
get_probs
|
rbassett3/Color-and-Edge-Aware-Perturbations
| 3
|
python
|
def get_probs(model, img):
'Given an image that has not had the imagenet_transorm done,\n obtain the vector probabilities for each label in the ImageNet Dataset'
return torch.softmax(model(imagenet_transform(img)), 1)
|
def get_probs(model, img):
'Given an image that has not had the imagenet_transorm done,\n obtain the vector probabilities for each label in the ImageNet Dataset'
return torch.softmax(model(imagenet_transform(img)), 1)<|docstring|>Given an image that has not had the imagenet_transorm done,
obtain the vector probabilities for each label in the ImageNet Dataset<|endoftext|>
|
0b34229cc39b27ffbc7495b637ca94275e713f670348eda27581371e721fb55c
|
def __init__(self, model_name, device='CPU', _extensions=None, threshold=0.5):
'\n TODO: Use this to set your instance variables.\n '
self.core = None
self.net = None
self.model = None
self.model_structure = (model_name + '.xml')
self.model_weights = (model_name + '.bin')
self.device = device
self.threshold = threshold
self.output_blob = None
self.height = None
self.width = None
self.channels = None
self.input_blob = None
|
TODO: Use this to set your instance variables.
|
src/facial_landmarks_detection.py
|
__init__
|
AdrianVazquezMejia/Mouse_controller
| 0
|
python
|
def __init__(self, model_name, device='CPU', _extensions=None, threshold=0.5):
'\n \n '
self.core = None
self.net = None
self.model = None
self.model_structure = (model_name + '.xml')
self.model_weights = (model_name + '.bin')
self.device = device
self.threshold = threshold
self.output_blob = None
self.height = None
self.width = None
self.channels = None
self.input_blob = None
|
def __init__(self, model_name, device='CPU', _extensions=None, threshold=0.5):
'\n \n '
self.core = None
self.net = None
self.model = None
self.model_structure = (model_name + '.xml')
self.model_weights = (model_name + '.bin')
self.device = device
self.threshold = threshold
self.output_blob = None
self.height = None
self.width = None
self.channels = None
self.input_blob = None<|docstring|>TODO: Use this to set your instance variables.<|endoftext|>
|
d9225f7f8dfdde2735cd83ec83261bba731bd038a36e56abc72ea046f7bf6229
|
def load_model(self):
'crop\n TODO: You will need to complete this method.\n This method is for loading the model to the device specified by the user.\n If your model requires any Plugins, this is where you can load them.\n '
self.core = IECore()
self.model = IENetwork(self.model_structure, self.model_weights)
self.net = self.core.load_network(network=self.model, device_name=self.device)
print('Landmarks model loaded')
|
crop
TODO: You will need to complete this method.
This method is for loading the model to the device specified by the user.
If your model requires any Plugins, this is where you can load them.
|
src/facial_landmarks_detection.py
|
load_model
|
AdrianVazquezMejia/Mouse_controller
| 0
|
python
|
def load_model(self):
'crop\n TODO: You will need to complete this method.\n This method is for loading the model to the device specified by the user.\n If your model requires any Plugins, this is where you can load them.\n '
self.core = IECore()
self.model = IENetwork(self.model_structure, self.model_weights)
self.net = self.core.load_network(network=self.model, device_name=self.device)
print('Landmarks model loaded')
|
def load_model(self):
'crop\n TODO: You will need to complete this method.\n This method is for loading the model to the device specified by the user.\n If your model requires any Plugins, this is where you can load them.\n '
self.core = IECore()
self.model = IENetwork(self.model_structure, self.model_weights)
self.net = self.core.load_network(network=self.model, device_name=self.device)
print('Landmarks model loaded')<|docstring|>crop
TODO: You will need to complete this method.
This method is for loading the model to the device specified by the user.
If your model requires any Plugins, this is where you can load them.<|endoftext|>
|
cf5e4aeb02e7783acb768f6c78b1970c6d48a01f65705e33ba17af3117be6592
|
def predict(self, image):
'\n TODO: You will need to complete this method.\n This method is meant for running predictions on the input image.\n '
(self.height, self.width, self.channels) = image.shape
input_image = self.preprocess_input(image)
self.net.infer({self.input_blob: input_image})
self.output_blob = next(iter(self.model.outputs))
output = self.net.requests[0].outputs[self.output_blob]
coords = self.preprocess_output(output)
out_frame = None
reye = None
leye = None
if (len(coords) > 0):
(reye, leye) = self.crop_eyes(coords, image)
out_frame = self.draw_outputs(coords, image)
return (out_frame, reye, leye)
|
TODO: You will need to complete this method.
This method is meant for running predictions on the input image.
|
src/facial_landmarks_detection.py
|
predict
|
AdrianVazquezMejia/Mouse_controller
| 0
|
python
|
def predict(self, image):
'\n TODO: You will need to complete this method.\n This method is meant for running predictions on the input image.\n '
(self.height, self.width, self.channels) = image.shape
input_image = self.preprocess_input(image)
self.net.infer({self.input_blob: input_image})
self.output_blob = next(iter(self.model.outputs))
output = self.net.requests[0].outputs[self.output_blob]
coords = self.preprocess_output(output)
out_frame = None
reye = None
leye = None
if (len(coords) > 0):
(reye, leye) = self.crop_eyes(coords, image)
out_frame = self.draw_outputs(coords, image)
return (out_frame, reye, leye)
|
def predict(self, image):
'\n TODO: You will need to complete this method.\n This method is meant for running predictions on the input image.\n '
(self.height, self.width, self.channels) = image.shape
input_image = self.preprocess_input(image)
self.net.infer({self.input_blob: input_image})
self.output_blob = next(iter(self.model.outputs))
output = self.net.requests[0].outputs[self.output_blob]
coords = self.preprocess_output(output)
out_frame = None
reye = None
leye = None
if (len(coords) > 0):
(reye, leye) = self.crop_eyes(coords, image)
out_frame = self.draw_outputs(coords, image)
return (out_frame, reye, leye)<|docstring|>TODO: You will need to complete this method.
This method is meant for running predictions on the input image.<|endoftext|>
|
4a60ef21654e4e463f1d116f7493372222f2decec50e883f46ce7361c30c0dcf
|
def preprocess_input(self, image):
'\n Before feeding the data into the model for inference,\n you might have to preprocess it. This function is where you can do that.\n '
self.input_blob = next(iter(self.model.inputs))
shape = self.model.inputs[self.input_blob].shape
frame = cv2.resize(image, (shape[3], shape[2]))
frame = frame.transpose((2, 0, 1))
frame = frame.reshape(1, *frame.shape)
return frame
|
Before feeding the data into the model for inference,
you might have to preprocess it. This function is where you can do that.
|
src/facial_landmarks_detection.py
|
preprocess_input
|
AdrianVazquezMejia/Mouse_controller
| 0
|
python
|
def preprocess_input(self, image):
'\n Before feeding the data into the model for inference,\n you might have to preprocess it. This function is where you can do that.\n '
self.input_blob = next(iter(self.model.inputs))
shape = self.model.inputs[self.input_blob].shape
frame = cv2.resize(image, (shape[3], shape[2]))
frame = frame.transpose((2, 0, 1))
frame = frame.reshape(1, *frame.shape)
return frame
|
def preprocess_input(self, image):
'\n Before feeding the data into the model for inference,\n you might have to preprocess it. This function is where you can do that.\n '
self.input_blob = next(iter(self.model.inputs))
shape = self.model.inputs[self.input_blob].shape
frame = cv2.resize(image, (shape[3], shape[2]))
frame = frame.transpose((2, 0, 1))
frame = frame.reshape(1, *frame.shape)
return frame<|docstring|>Before feeding the data into the model for inference,
you might have to preprocess it. This function is where you can do that.<|endoftext|>
|
2e1bd1c1489ca11c44b6a3345049a914c1bec1abf40d2f2c5f7f935cb96887f7
|
def preprocess_output(self, outputs):
'\n Before feeding the output of this model to the next model,\n you might have to preprocess the output. This function is where you can do that.\n '
arr = outputs.flatten()
matrix = [((arr[i] * self.height) if (i % 2) else (arr[i] * self.width)) for (i, _) in enumerate(arr)]
(*matrix,) = map(int, matrix)
return matrix
|
Before feeding the output of this model to the next model,
you might have to preprocess the output. This function is where you can do that.
|
src/facial_landmarks_detection.py
|
preprocess_output
|
AdrianVazquezMejia/Mouse_controller
| 0
|
python
|
def preprocess_output(self, outputs):
'\n Before feeding the output of this model to the next model,\n you might have to preprocess the output. This function is where you can do that.\n '
arr = outputs.flatten()
matrix = [((arr[i] * self.height) if (i % 2) else (arr[i] * self.width)) for (i, _) in enumerate(arr)]
(*matrix,) = map(int, matrix)
return matrix
|
def preprocess_output(self, outputs):
'\n Before feeding the output of this model to the next model,\n you might have to preprocess the output. This function is where you can do that.\n '
arr = outputs.flatten()
matrix = [((arr[i] * self.height) if (i % 2) else (arr[i] * self.width)) for (i, _) in enumerate(arr)]
(*matrix,) = map(int, matrix)
return matrix<|docstring|>Before feeding the output of this model to the next model,
you might have to preprocess the output. This function is where you can do that.<|endoftext|>
|
612b7d056db200a305dd32d99abd04d1fa29fbf6484b71ac39dcca74034ca46c
|
def parse_message(message):
'\n 사용자에게 메시지를 받아, 필요한 항목을 불러옴\n '
print('parse_message')
user_id = message['message']['chat']['id']
userName = (message['message']['chat']['first_name'] + message['message']['chat']['last_name'])
msg = message['message']['text']
return (user_id, userName, msg)
|
사용자에게 메시지를 받아, 필요한 항목을 불러옴
|
Festibot.py
|
parse_message
|
imeeke83/sba_FestiBot
| 0
|
python
|
def parse_message(message):
'\n \n '
print('parse_message')
user_id = message['message']['chat']['id']
userName = (message['message']['chat']['first_name'] + message['message']['chat']['last_name'])
msg = message['message']['text']
return (user_id, userName, msg)
|
def parse_message(message):
'\n \n '
print('parse_message')
user_id = message['message']['chat']['id']
userName = (message['message']['chat']['first_name'] + message['message']['chat']['last_name'])
msg = message['message']['text']
return (user_id, userName, msg)<|docstring|>사용자에게 메시지를 받아, 필요한 항목을 불러옴<|endoftext|>
|
696532996ab5df720cfab9dcc11f2a00d88c70a1e9bde4f28dd6350e01046d99
|
def send_message(user_id, text):
'\n 사용자에게 메세지를 보냄\n '
print('send_message')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
params = {'chat_id': user_id, 'text': text}
response = requests.post(url, json=params)
return response
|
사용자에게 메세지를 보냄
|
Festibot.py
|
send_message
|
imeeke83/sba_FestiBot
| 0
|
python
|
def send_message(user_id, text):
'\n \n '
print('send_message')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
params = {'chat_id': user_id, 'text': text}
response = requests.post(url, json=params)
return response
|
def send_message(user_id, text):
'\n \n '
print('send_message')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
params = {'chat_id': user_id, 'text': text}
response = requests.post(url, json=params)
return response<|docstring|>사용자에게 메세지를 보냄<|endoftext|>
|
971adf88d8fa1d6a9eb65fc8184e16b33454e22424ec20f3ac710f509d533275
|
def find_userInfo(user_id, userName):
'\n UserDB.xlsx 파일에 User 정보가 존재하는지 확인\n 존재하지 않는 경우, User 정보를 추가하고 초기화\n '
print('find_userInfo')
for row in userInfoDB.rows:
if (row[0].value == user_id):
return True
userInfoDB[(userInfoDB.max_row + 1)][0].value = user_id
userInfoDB[userInfoDB.max_row][1].value = userName
for i in range(3, 18):
userInfoDB.cell(row=userInfoDB.max_row, column=i).value = 0
db.save(EXCEL_FILE_NAME)
return False
|
UserDB.xlsx 파일에 User 정보가 존재하는지 확인
존재하지 않는 경우, User 정보를 추가하고 초기화
|
Festibot.py
|
find_userInfo
|
imeeke83/sba_FestiBot
| 0
|
python
|
def find_userInfo(user_id, userName):
'\n UserDB.xlsx 파일에 User 정보가 존재하는지 확인\n 존재하지 않는 경우, User 정보를 추가하고 초기화\n '
print('find_userInfo')
for row in userInfoDB.rows:
if (row[0].value == user_id):
return True
userInfoDB[(userInfoDB.max_row + 1)][0].value = user_id
userInfoDB[userInfoDB.max_row][1].value = userName
for i in range(3, 18):
userInfoDB.cell(row=userInfoDB.max_row, column=i).value = 0
db.save(EXCEL_FILE_NAME)
return False
|
def find_userInfo(user_id, userName):
'\n UserDB.xlsx 파일에 User 정보가 존재하는지 확인\n 존재하지 않는 경우, User 정보를 추가하고 초기화\n '
print('find_userInfo')
for row in userInfoDB.rows:
if (row[0].value == user_id):
return True
userInfoDB[(userInfoDB.max_row + 1)][0].value = user_id
userInfoDB[userInfoDB.max_row][1].value = userName
for i in range(3, 18):
userInfoDB.cell(row=userInfoDB.max_row, column=i).value = 0
db.save(EXCEL_FILE_NAME)
return False<|docstring|>UserDB.xlsx 파일에 User 정보가 존재하는지 확인
존재하지 않는 경우, User 정보를 추가하고 초기화<|endoftext|>
|
a8a0b5d37298d13eed6d18fdc1af19313d080e2b5e1f025c5ba4a6a2faa6d9a3
|
def find_whatUserLike(user_id):
'\n 저장된 User의 선호 축제를 검색하여 선호 축제의 코드값 검색\n '
print('find_whatUserLike')
for row in userInfoDB.rows:
if (row[0].value == user_id):
userRow = row[0].row
userLikecontent = []
for i in range(3, 18):
userLikecontent.append(userInfoDB.cell(row=userRow, column=i).value)
max = 0
for value in userLikecontent:
if (value > max):
max = value
if (max > 0):
return contentListCode.index(userLikecontent.index(max))
else:
return 0
|
저장된 User의 선호 축제를 검색하여 선호 축제의 코드값 검색
|
Festibot.py
|
find_whatUserLike
|
imeeke83/sba_FestiBot
| 0
|
python
|
def find_whatUserLike(user_id):
'\n \n '
print('find_whatUserLike')
for row in userInfoDB.rows:
if (row[0].value == user_id):
userRow = row[0].row
userLikecontent = []
for i in range(3, 18):
userLikecontent.append(userInfoDB.cell(row=userRow, column=i).value)
max = 0
for value in userLikecontent:
if (value > max):
max = value
if (max > 0):
return contentListCode.index(userLikecontent.index(max))
else:
return 0
|
def find_whatUserLike(user_id):
'\n \n '
print('find_whatUserLike')
for row in userInfoDB.rows:
if (row[0].value == user_id):
userRow = row[0].row
userLikecontent = []
for i in range(3, 18):
userLikecontent.append(userInfoDB.cell(row=userRow, column=i).value)
max = 0
for value in userLikecontent:
if (value > max):
max = value
if (max > 0):
return contentListCode.index(userLikecontent.index(max))
else:
return 0<|docstring|>저장된 User의 선호 축제를 검색하여 선호 축제의 코드값 검색<|endoftext|>
|
80752b9da89d12426daf69f0c4620a356d1d2b2ebcd34d2dfc3ea076408d73dd
|
def send_welcome_msg(user_id, userName):
'\n 처음 방문한 사용자에게 환영 메시지 출력.\n '
print('send_welcome_msg')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
welcomeMsg = f'{userName}님 안녕하세요. 저는 페스티봇이에요. 축제를 알려드립니다 !'
params = {'chat_id': user_id, 'text': welcomeMsg}
requests.post(url, json=params)
|
처음 방문한 사용자에게 환영 메시지 출력.
|
Festibot.py
|
send_welcome_msg
|
imeeke83/sba_FestiBot
| 0
|
python
|
def send_welcome_msg(user_id, userName):
'\n \n '
print('send_welcome_msg')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
welcomeMsg = f'{userName}님 안녕하세요. 저는 페스티봇이에요. 축제를 알려드립니다 !'
params = {'chat_id': user_id, 'text': welcomeMsg}
requests.post(url, json=params)
|
def send_welcome_msg(user_id, userName):
'\n \n '
print('send_welcome_msg')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
welcomeMsg = f'{userName}님 안녕하세요. 저는 페스티봇이에요. 축제를 알려드립니다 !'
params = {'chat_id': user_id, 'text': welcomeMsg}
requests.post(url, json=params)<|docstring|>처음 방문한 사용자에게 환영 메시지 출력.<|endoftext|>
|
7816b6126796035327220ceab14b9c21de648e2e673bde53b757b61a37bc04b5
|
def thisUserIsFirst(user_id, userName):
'\n 유저 정보를 확인하여, 기존 유저 또는 첫 방문자인지 검사\n '
print('thisUserIsFirst')
if find_userInfo(user_id, userName):
userLike = find_whatUserLike(user_id)
if (userLike == 0):
pass
else:
stateDB.loc[(user_id, 'contentCode')] = userLike
else:
send_welcome_msg(user_id, userName)
if (user_id in stateDB.index):
pass
else:
stateDB.loc[user_id] = np.nan
|
유저 정보를 확인하여, 기존 유저 또는 첫 방문자인지 검사
|
Festibot.py
|
thisUserIsFirst
|
imeeke83/sba_FestiBot
| 0
|
python
|
def thisUserIsFirst(user_id, userName):
'\n \n '
print('thisUserIsFirst')
if find_userInfo(user_id, userName):
userLike = find_whatUserLike(user_id)
if (userLike == 0):
pass
else:
stateDB.loc[(user_id, 'contentCode')] = userLike
else:
send_welcome_msg(user_id, userName)
if (user_id in stateDB.index):
pass
else:
stateDB.loc[user_id] = np.nan
|
def thisUserIsFirst(user_id, userName):
'\n \n '
print('thisUserIsFirst')
if find_userInfo(user_id, userName):
userLike = find_whatUserLike(user_id)
if (userLike == 0):
pass
else:
stateDB.loc[(user_id, 'contentCode')] = userLike
else:
send_welcome_msg(user_id, userName)
if (user_id in stateDB.index):
pass
else:
stateDB.loc[user_id] = np.nan<|docstring|>유저 정보를 확인하여, 기존 유저 또는 첫 방문자인지 검사<|endoftext|>
|
bad19bf3bde6f22de76e54fd89d926c6172a8fae3b7045d493b40b1d8458f19c
|
def click_buttonFirst(user_id, msg):
'\n 사용자에게 최초 버튼 선택 화면을 보여줌\n '
print('click_buttonFirst')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
keyboard = {'keyboard': [[{'text': '축제 기간'}, {'text': '축제 종류'}]], 'one_time_keyboard': True}
params = {'chat_id': user_id, 'text': msg, 'reply_markup': keyboard}
requests.post(url, json=params)
|
사용자에게 최초 버튼 선택 화면을 보여줌
|
Festibot.py
|
click_buttonFirst
|
imeeke83/sba_FestiBot
| 0
|
python
|
def click_buttonFirst(user_id, msg):
'\n \n '
print('click_buttonFirst')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
keyboard = {'keyboard': [[{'text': '축제 기간'}, {'text': '축제 종류'}]], 'one_time_keyboard': True}
params = {'chat_id': user_id, 'text': msg, 'reply_markup': keyboard}
requests.post(url, json=params)
|
def click_buttonFirst(user_id, msg):
'\n \n '
print('click_buttonFirst')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
keyboard = {'keyboard': [[{'text': '축제 기간'}, {'text': '축제 종류'}]], 'one_time_keyboard': True}
params = {'chat_id': user_id, 'text': msg, 'reply_markup': keyboard}
requests.post(url, json=params)<|docstring|>사용자에게 최초 버튼 선택 화면을 보여줌<|endoftext|>
|
4bd29581682d3e5c8fb21c47f9bfb2c5013c8cc408e862799d1d3005cd76e7eb
|
def choice_calendarDate(user_id, msg):
'\n 사용자에게 세부 일정 검색 선택 화면을 보여줌\n '
print('choice_calendarDate')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
keyboard = {'keyboard': [[{'text': '오늘 축제'}, {'text': '내일 축제'}], [{'text': '이번주 축제'}, {'text': '이번달 축제'}]], 'one_time_keyboard': True}
params = {'chat_id': user_id, 'text': msg, 'reply_markup': keyboard}
requests.post(url, json=params)
|
사용자에게 세부 일정 검색 선택 화면을 보여줌
|
Festibot.py
|
choice_calendarDate
|
imeeke83/sba_FestiBot
| 0
|
python
|
def choice_calendarDate(user_id, msg):
'\n \n '
print('choice_calendarDate')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
keyboard = {'keyboard': [[{'text': '오늘 축제'}, {'text': '내일 축제'}], [{'text': '이번주 축제'}, {'text': '이번달 축제'}]], 'one_time_keyboard': True}
params = {'chat_id': user_id, 'text': msg, 'reply_markup': keyboard}
requests.post(url, json=params)
|
def choice_calendarDate(user_id, msg):
'\n \n '
print('choice_calendarDate')
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
keyboard = {'keyboard': [[{'text': '오늘 축제'}, {'text': '내일 축제'}], [{'text': '이번주 축제'}, {'text': '이번달 축제'}]], 'one_time_keyboard': True}
params = {'chat_id': user_id, 'text': msg, 'reply_markup': keyboard}
requests.post(url, json=params)<|docstring|>사용자에게 세부 일정 검색 선택 화면을 보여줌<|endoftext|>
|
0826128f4f71018e3a9bd341bb8950bfda32f9a3ae98dfae5ec690e8512d1e4c
|
def choice_fixCalendarDate(user_id, msg):
'\n 사용자가 선택한 세부 일정 별 시작일 및 종료일을 state로 저장\n '
print('choice_fixCalendarDate')
if dateWrite.match(msg):
(stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')]) = msg.split('-')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '오늘 축제'):
stateDB.loc[(user_id, 'eventStartDate')] = datetime.today().strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = datetime.today().strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '내일 축제'):
tomorrow = (datetime.today() + timedelta(days=1))
stateDB.loc[(user_id, 'eventStartDate')] = tomorrow.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = tomorrow.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '이번주 축제'):
startDate = (datetime.today() - timedelta(days=datetime.today().weekday()))
endDate = (datetime.today() - timedelta(days=(datetime.today().weekday() - 7)))
stateDB.loc[(user_id, 'eventStartDate')] = startDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = endDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '이번달 축제'):
startDate = datetime.today().replace(day=1)
endDate = datetime.today().replace(day=calendar.monthrange(datetime.today().year, datetime.today().month)[1])
stateDB.loc[(user_id, 'eventStartDate')] = startDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = endDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
print(stateDB)
|
사용자가 선택한 세부 일정 별 시작일 및 종료일을 state로 저장
|
Festibot.py
|
choice_fixCalendarDate
|
imeeke83/sba_FestiBot
| 0
|
python
|
def choice_fixCalendarDate(user_id, msg):
'\n \n '
print('choice_fixCalendarDate')
if dateWrite.match(msg):
(stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')]) = msg.split('-')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '오늘 축제'):
stateDB.loc[(user_id, 'eventStartDate')] = datetime.today().strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = datetime.today().strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '내일 축제'):
tomorrow = (datetime.today() + timedelta(days=1))
stateDB.loc[(user_id, 'eventStartDate')] = tomorrow.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = tomorrow.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '이번주 축제'):
startDate = (datetime.today() - timedelta(days=datetime.today().weekday()))
endDate = (datetime.today() - timedelta(days=(datetime.today().weekday() - 7)))
stateDB.loc[(user_id, 'eventStartDate')] = startDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = endDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '이번달 축제'):
startDate = datetime.today().replace(day=1)
endDate = datetime.today().replace(day=calendar.monthrange(datetime.today().year, datetime.today().month)[1])
stateDB.loc[(user_id, 'eventStartDate')] = startDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = endDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
print(stateDB)
|
def choice_fixCalendarDate(user_id, msg):
'\n \n '
print('choice_fixCalendarDate')
if dateWrite.match(msg):
(stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')]) = msg.split('-')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '오늘 축제'):
stateDB.loc[(user_id, 'eventStartDate')] = datetime.today().strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = datetime.today().strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '내일 축제'):
tomorrow = (datetime.today() + timedelta(days=1))
stateDB.loc[(user_id, 'eventStartDate')] = tomorrow.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = tomorrow.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '이번주 축제'):
startDate = (datetime.today() - timedelta(days=datetime.today().weekday()))
endDate = (datetime.today() - timedelta(days=(datetime.today().weekday() - 7)))
stateDB.loc[(user_id, 'eventStartDate')] = startDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = endDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg == '이번달 축제'):
startDate = datetime.today().replace(day=1)
endDate = datetime.today().replace(day=calendar.monthrange(datetime.today().year, datetime.today().month)[1])
stateDB.loc[(user_id, 'eventStartDate')] = startDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'eventEndDate')] = endDate.strftime('%Y%m%d')
stateDB.loc[(user_id, 'stateCode')] = np.nan
print(stateDB)<|docstring|>사용자가 선택한 세부 일정 별 시작일 및 종료일을 state로 저장<|endoftext|>
|
4555a8295a82fef41711165ed38df1f996ff727b736cfd6f2998ef7806bcb6ad
|
def choice_contentCode(user_id, msg):
'\n 사용자가 선택한 종류를 state로 저장\n '
print('choice_contentCode')
if (msg in contentListName):
index = contentListName.index(msg)
stateDB.loc[(user_id, 'contentCode')] = contentListCode[index]
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg.isdigit() and (int(msg) > 0) and (int(msg) < 16)):
index = (int(msg) - 1)
stateDB.loc[(user_id, 'contentCode')] = contentListCode[index]
stateDB.loc[(user_id, 'stateCode')] = np.nan
print(stateDB)
|
사용자가 선택한 종류를 state로 저장
|
Festibot.py
|
choice_contentCode
|
imeeke83/sba_FestiBot
| 0
|
python
|
def choice_contentCode(user_id, msg):
'\n \n '
print('choice_contentCode')
if (msg in contentListName):
index = contentListName.index(msg)
stateDB.loc[(user_id, 'contentCode')] = contentListCode[index]
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg.isdigit() and (int(msg) > 0) and (int(msg) < 16)):
index = (int(msg) - 1)
stateDB.loc[(user_id, 'contentCode')] = contentListCode[index]
stateDB.loc[(user_id, 'stateCode')] = np.nan
print(stateDB)
|
def choice_contentCode(user_id, msg):
'\n \n '
print('choice_contentCode')
if (msg in contentListName):
index = contentListName.index(msg)
stateDB.loc[(user_id, 'contentCode')] = contentListCode[index]
stateDB.loc[(user_id, 'stateCode')] = np.nan
elif (msg.isdigit() and (int(msg) > 0) and (int(msg) < 16)):
index = (int(msg) - 1)
stateDB.loc[(user_id, 'contentCode')] = contentListCode[index]
stateDB.loc[(user_id, 'stateCode')] = np.nan
print(stateDB)<|docstring|>사용자가 선택한 종류를 state로 저장<|endoftext|>
|
98abc71aa13f92d3fc554b41c02b011f58bd6d464594342c54bbdfa1bb38a8aa
|
def searchContentFestival(user_id, startDate, endDate, content, pageNo):
'\n 전체 축제 중 사용자가 선택한 종류의 축제만을 선별\n '
print('searchContentFestival')
url = f'http://api.visitkorea.or.kr/openapi/service/rest/KorService/searchFestival?numOfRows={numOfRows}&MobileOS=ETC&MobileApp=Festibot&serviceKey={serviceKey}&listYN={listYN}&arrange={arrange}&areaCode=1&eventStartDate={startDate}&eventEndDate={endDate}&pageNo={pageNo}&_type=json'
resp = requests.get(url)
data = resp.json()
festivalInfo = data['response']['body']['items']['item']
festivalList = []
for infoDec in festivalInfo:
if (infoDec['cat3'] == content):
fixList = {'cat3': infoDec['cat3'], 'firstimage': infoDec['firstimage'], 'title': infoDec['title'], 'eventenddate': infoDec['eventenddate'], 'eventstartdate': infoDec['eventstartdate'], 'addr1': infoDec['addr1']}
festivalList.append(copy.deepcopy(fixList))
print('searchContentFestival 1 : ', festivalList)
return festivalList
|
전체 축제 중 사용자가 선택한 종류의 축제만을 선별
|
Festibot.py
|
searchContentFestival
|
imeeke83/sba_FestiBot
| 0
|
python
|
def searchContentFestival(user_id, startDate, endDate, content, pageNo):
'\n \n '
print('searchContentFestival')
url = f'http://api.visitkorea.or.kr/openapi/service/rest/KorService/searchFestival?numOfRows={numOfRows}&MobileOS=ETC&MobileApp=Festibot&serviceKey={serviceKey}&listYN={listYN}&arrange={arrange}&areaCode=1&eventStartDate={startDate}&eventEndDate={endDate}&pageNo={pageNo}&_type=json'
resp = requests.get(url)
data = resp.json()
festivalInfo = data['response']['body']['items']['item']
festivalList = []
for infoDec in festivalInfo:
if (infoDec['cat3'] == content):
fixList = {'cat3': infoDec['cat3'], 'firstimage': infoDec['firstimage'], 'title': infoDec['title'], 'eventenddate': infoDec['eventenddate'], 'eventstartdate': infoDec['eventstartdate'], 'addr1': infoDec['addr1']}
festivalList.append(copy.deepcopy(fixList))
print('searchContentFestival 1 : ', festivalList)
return festivalList
|
def searchContentFestival(user_id, startDate, endDate, content, pageNo):
'\n \n '
print('searchContentFestival')
url = f'http://api.visitkorea.or.kr/openapi/service/rest/KorService/searchFestival?numOfRows={numOfRows}&MobileOS=ETC&MobileApp=Festibot&serviceKey={serviceKey}&listYN={listYN}&arrange={arrange}&areaCode=1&eventStartDate={startDate}&eventEndDate={endDate}&pageNo={pageNo}&_type=json'
resp = requests.get(url)
data = resp.json()
festivalInfo = data['response']['body']['items']['item']
festivalList = []
for infoDec in festivalInfo:
if (infoDec['cat3'] == content):
fixList = {'cat3': infoDec['cat3'], 'firstimage': infoDec['firstimage'], 'title': infoDec['title'], 'eventenddate': infoDec['eventenddate'], 'eventstartdate': infoDec['eventstartdate'], 'addr1': infoDec['addr1']}
festivalList.append(copy.deepcopy(fixList))
print('searchContentFestival 1 : ', festivalList)
return festivalList<|docstring|>전체 축제 중 사용자가 선택한 종류의 축제만을 선별<|endoftext|>
|
91ce83a86510e87deb98c765a0ad7379d24132bbe1a8262c9836c01d81d1c4ab
|
def searchAllFestival(user_id, startDate, endDate, content):
'\n 조건에 맞는 모든 축제 검색\n '
print('searchAllFestival')
url = f'http://api.visitkorea.or.kr/openapi/service/rest/KorService/searchFestival?numOfRows={numOfRows}&MobileOS=ETC&MobileApp=Festibot&serviceKey={serviceKey}&listYN={listYN}&arrange={arrange}&areaCode=1&eventStartDate={startDate}&eventEndDate={endDate}&pageNo=1&_type=json'
resp = requests.get(url)
data = resp.json()
festivalInfo = data['response']['body']['items']['item']
festivalList = []
if (content == False):
count = int(data['response']['body']['totalCount'])
if (count > numOfRows):
print('searchAllFestival 1 : ', festivalInfo)
return festivalInfo
else:
for infoDec in festivalInfo:
fixList = {'cat3': infoDec['cat3'], 'firstimage': infoDec['firstimage'], 'title': infoDec['title'], 'eventenddate': infoDec['eventenddate'], 'eventstartdate': infoDec['eventstartdate'], 'addr1': infoDec['addr1']}
festivalList.append(copy.deepcopy(fixList))
print('searchAllFestival 2 : ', festivalList)
return festivalList
else:
rootCount = (int(data['response']['body']['totalCount']) // 20)
for i in range(1, (rootCount + 1)):
festivalList.extend(copy.deepcopy(searchContentFestival(user_id, startDate, endDate, content, i)))
print('searchAllFestival 3 : ', festivalList)
return festivalList
|
조건에 맞는 모든 축제 검색
|
Festibot.py
|
searchAllFestival
|
imeeke83/sba_FestiBot
| 0
|
python
|
def searchAllFestival(user_id, startDate, endDate, content):
'\n \n '
print('searchAllFestival')
url = f'http://api.visitkorea.or.kr/openapi/service/rest/KorService/searchFestival?numOfRows={numOfRows}&MobileOS=ETC&MobileApp=Festibot&serviceKey={serviceKey}&listYN={listYN}&arrange={arrange}&areaCode=1&eventStartDate={startDate}&eventEndDate={endDate}&pageNo=1&_type=json'
resp = requests.get(url)
data = resp.json()
festivalInfo = data['response']['body']['items']['item']
festivalList = []
if (content == False):
count = int(data['response']['body']['totalCount'])
if (count > numOfRows):
print('searchAllFestival 1 : ', festivalInfo)
return festivalInfo
else:
for infoDec in festivalInfo:
fixList = {'cat3': infoDec['cat3'], 'firstimage': infoDec['firstimage'], 'title': infoDec['title'], 'eventenddate': infoDec['eventenddate'], 'eventstartdate': infoDec['eventstartdate'], 'addr1': infoDec['addr1']}
festivalList.append(copy.deepcopy(fixList))
print('searchAllFestival 2 : ', festivalList)
return festivalList
else:
rootCount = (int(data['response']['body']['totalCount']) // 20)
for i in range(1, (rootCount + 1)):
festivalList.extend(copy.deepcopy(searchContentFestival(user_id, startDate, endDate, content, i)))
print('searchAllFestival 3 : ', festivalList)
return festivalList
|
def searchAllFestival(user_id, startDate, endDate, content):
'\n \n '
print('searchAllFestival')
url = f'http://api.visitkorea.or.kr/openapi/service/rest/KorService/searchFestival?numOfRows={numOfRows}&MobileOS=ETC&MobileApp=Festibot&serviceKey={serviceKey}&listYN={listYN}&arrange={arrange}&areaCode=1&eventStartDate={startDate}&eventEndDate={endDate}&pageNo=1&_type=json'
resp = requests.get(url)
data = resp.json()
festivalInfo = data['response']['body']['items']['item']
festivalList = []
if (content == False):
count = int(data['response']['body']['totalCount'])
if (count > numOfRows):
print('searchAllFestival 1 : ', festivalInfo)
return festivalInfo
else:
for infoDec in festivalInfo:
fixList = {'cat3': infoDec['cat3'], 'firstimage': infoDec['firstimage'], 'title': infoDec['title'], 'eventenddate': infoDec['eventenddate'], 'eventstartdate': infoDec['eventstartdate'], 'addr1': infoDec['addr1']}
festivalList.append(copy.deepcopy(fixList))
print('searchAllFestival 2 : ', festivalList)
return festivalList
else:
rootCount = (int(data['response']['body']['totalCount']) // 20)
for i in range(1, (rootCount + 1)):
festivalList.extend(copy.deepcopy(searchContentFestival(user_id, startDate, endDate, content, i)))
print('searchAllFestival 3 : ', festivalList)
return festivalList<|docstring|>조건에 맞는 모든 축제 검색<|endoftext|>
|
66f38a4f5fddcac52bd26a71d4cc38b713e39b5ef0fc631cedf297da6ba12e1c
|
def festival_list_date(user_id, **kwargs):
'\n 축제 갯수를 이용하여 조건 추가 여부 판단\n '
print('festival_list_date')
startDate = (datetime.today() - timedelta(days=datetime.today().weekday()))
endDate = (datetime.today() - timedelta(days=(datetime.today().weekday() - 7)))
startDate = startDate.strftime('%Y%m%d')
endDate = endDate.strftime('%Y%m%d')
if ((not stateDB.isnull().loc[(user_id, 'eventStartDate')]) and (not stateDB.isnull().loc[(user_id, 'contentCode')])):
festivalList = searchAllFestival(user_id, stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')], stateDB.loc[(user_id, 'contentCode')])
if (len(festivalList) > numOfRows):
send_message(user_id, '일정과 축제 종류까지 선택했는데도 축제가 너무 많네요. 하지만 괜찮아요 가장 인기있는 축제 20개를 알려드릴께요 ! 이중에는 재미있는 축제가 너무너무 많답니다.')
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 1 : ', festivalList)
return festivalList
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 2 : ', festivalList)
return festivalList
elif stateDB.isnull().loc[(user_id, 'contentCode')]:
festivalList = searchAllFestival(user_id, stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')], False)
if (len(festivalList) > numOfRows):
send_message(user_id, '앗! 검색 결과가 너무 많아요. 다른 조건도 입력해 주세요. 축제 하면 전통행사 아니겠어요? 조건에 전통행사를 넣어보는 것도 추천드려요.')
print('festival_list_date 0 : ', festivalList)
return '0'
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 3 : ', festivalList)
return festivalList
elif stateDB.isnull().loc[(user_id, 'eventStartDate')]:
festivalList = searchAllFestival(user_id, startDate, endDate, stateDB.loc[(user_id, 'contentCode')])
if (len(festivalList) > numOfRows):
send_message(user_id, '앗! 검색 결과가 너무 많아요. 다른 조건도 입력해 주세요. 조건에 한 달 이내를 넣는 건 어떠세요? 이번 달에 재미있는 축제가 많아요!')
print('festival_list_date 0 : ', festivalList)
return '0'
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 4 : ', festivalList)
return festivalList
|
축제 갯수를 이용하여 조건 추가 여부 판단
|
Festibot.py
|
festival_list_date
|
imeeke83/sba_FestiBot
| 0
|
python
|
def festival_list_date(user_id, **kwargs):
'\n \n '
print('festival_list_date')
startDate = (datetime.today() - timedelta(days=datetime.today().weekday()))
endDate = (datetime.today() - timedelta(days=(datetime.today().weekday() - 7)))
startDate = startDate.strftime('%Y%m%d')
endDate = endDate.strftime('%Y%m%d')
if ((not stateDB.isnull().loc[(user_id, 'eventStartDate')]) and (not stateDB.isnull().loc[(user_id, 'contentCode')])):
festivalList = searchAllFestival(user_id, stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')], stateDB.loc[(user_id, 'contentCode')])
if (len(festivalList) > numOfRows):
send_message(user_id, '일정과 축제 종류까지 선택했는데도 축제가 너무 많네요. 하지만 괜찮아요 가장 인기있는 축제 20개를 알려드릴께요 ! 이중에는 재미있는 축제가 너무너무 많답니다.')
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 1 : ', festivalList)
return festivalList
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 2 : ', festivalList)
return festivalList
elif stateDB.isnull().loc[(user_id, 'contentCode')]:
festivalList = searchAllFestival(user_id, stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')], False)
if (len(festivalList) > numOfRows):
send_message(user_id, '앗! 검색 결과가 너무 많아요. 다른 조건도 입력해 주세요. 축제 하면 전통행사 아니겠어요? 조건에 전통행사를 넣어보는 것도 추천드려요.')
print('festival_list_date 0 : ', festivalList)
return '0'
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 3 : ', festivalList)
return festivalList
elif stateDB.isnull().loc[(user_id, 'eventStartDate')]:
festivalList = searchAllFestival(user_id, startDate, endDate, stateDB.loc[(user_id, 'contentCode')])
if (len(festivalList) > numOfRows):
send_message(user_id, '앗! 검색 결과가 너무 많아요. 다른 조건도 입력해 주세요. 조건에 한 달 이내를 넣는 건 어떠세요? 이번 달에 재미있는 축제가 많아요!')
print('festival_list_date 0 : ', festivalList)
return '0'
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 4 : ', festivalList)
return festivalList
|
def festival_list_date(user_id, **kwargs):
'\n \n '
print('festival_list_date')
startDate = (datetime.today() - timedelta(days=datetime.today().weekday()))
endDate = (datetime.today() - timedelta(days=(datetime.today().weekday() - 7)))
startDate = startDate.strftime('%Y%m%d')
endDate = endDate.strftime('%Y%m%d')
if ((not stateDB.isnull().loc[(user_id, 'eventStartDate')]) and (not stateDB.isnull().loc[(user_id, 'contentCode')])):
festivalList = searchAllFestival(user_id, stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')], stateDB.loc[(user_id, 'contentCode')])
if (len(festivalList) > numOfRows):
send_message(user_id, '일정과 축제 종류까지 선택했는데도 축제가 너무 많네요. 하지만 괜찮아요 가장 인기있는 축제 20개를 알려드릴께요 ! 이중에는 재미있는 축제가 너무너무 많답니다.')
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 1 : ', festivalList)
return festivalList
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 2 : ', festivalList)
return festivalList
elif stateDB.isnull().loc[(user_id, 'contentCode')]:
festivalList = searchAllFestival(user_id, stateDB.loc[(user_id, 'eventStartDate')], stateDB.loc[(user_id, 'eventEndDate')], False)
if (len(festivalList) > numOfRows):
send_message(user_id, '앗! 검색 결과가 너무 많아요. 다른 조건도 입력해 주세요. 축제 하면 전통행사 아니겠어요? 조건에 전통행사를 넣어보는 것도 추천드려요.')
print('festival_list_date 0 : ', festivalList)
return '0'
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 3 : ', festivalList)
return festivalList
elif stateDB.isnull().loc[(user_id, 'eventStartDate')]:
festivalList = searchAllFestival(user_id, startDate, endDate, stateDB.loc[(user_id, 'contentCode')])
if (len(festivalList) > numOfRows):
send_message(user_id, '앗! 검색 결과가 너무 많아요. 다른 조건도 입력해 주세요. 조건에 한 달 이내를 넣는 건 어떠세요? 이번 달에 재미있는 축제가 많아요!')
print('festival_list_date 0 : ', festivalList)
return '0'
else:
festivalList = showFestivalList(user_id, festivalList)
print('festival_list_date 4 : ', festivalList)
return festivalList<|docstring|>축제 갯수를 이용하여 조건 추가 여부 판단<|endoftext|>
|
36d39b6e342fb429bdfe8c4bdc7f87a2b32e6976856f50cde8e89405ee0a7c9c
|
def choice_detailFestival(user_id, festivalList, msg):
'\n 사용자가 선택한 축제의 상세 정보 출력\n '
print('choice_detailFestival')
print('choice_detailFestival : ', festivalList)
if (len(festivalList) > int(msg)):
detailFestival = festivalList[int(msg)]
url = 'https://api.telegram.org/bot{token}/sendPhoto'.format(token=API_KEY)
params = {'chat_id': user_id, 'photo': detailFestival['firstimage']}
requests.post(url, json=params)
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
msg = f'''축제 종류 : {detailFestival['cat3']}
축제 이름 : {detailFestival['title']}
축제 기간 : {detailFestival['eventstartdate']} ~ {detailFestival['eventenddate']}
주소 : {detailFestival['addr1']}'''
params = {'chat_id': user_id, 'text': msg}
requests.post(url, json=params)
send_message(user_id, '소개해드린 축제에 가고싶으신가요??')
stateDB.loc[user_id] = np.nan
else:
send_message(user_id, '올바른 축제 번호를 입력해주세요 !')
|
사용자가 선택한 축제의 상세 정보 출력
|
Festibot.py
|
choice_detailFestival
|
imeeke83/sba_FestiBot
| 0
|
python
|
def choice_detailFestival(user_id, festivalList, msg):
'\n \n '
print('choice_detailFestival')
print('choice_detailFestival : ', festivalList)
if (len(festivalList) > int(msg)):
detailFestival = festivalList[int(msg)]
url = 'https://api.telegram.org/bot{token}/sendPhoto'.format(token=API_KEY)
params = {'chat_id': user_id, 'photo': detailFestival['firstimage']}
requests.post(url, json=params)
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
msg = f'축제 종류 : {detailFestival['cat3']}
축제 이름 : {detailFestival['title']}
축제 기간 : {detailFestival['eventstartdate']} ~ {detailFestival['eventenddate']}
주소 : {detailFestival['addr1']}'
params = {'chat_id': user_id, 'text': msg}
requests.post(url, json=params)
send_message(user_id, '소개해드린 축제에 가고싶으신가요??')
stateDB.loc[user_id] = np.nan
else:
send_message(user_id, '올바른 축제 번호를 입력해주세요 !')
|
def choice_detailFestival(user_id, festivalList, msg):
'\n \n '
print('choice_detailFestival')
print('choice_detailFestival : ', festivalList)
if (len(festivalList) > int(msg)):
detailFestival = festivalList[int(msg)]
url = 'https://api.telegram.org/bot{token}/sendPhoto'.format(token=API_KEY)
params = {'chat_id': user_id, 'photo': detailFestival['firstimage']}
requests.post(url, json=params)
url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY)
msg = f'축제 종류 : {detailFestival['cat3']}
축제 이름 : {detailFestival['title']}
축제 기간 : {detailFestival['eventstartdate']} ~ {detailFestival['eventenddate']}
주소 : {detailFestival['addr1']}'
params = {'chat_id': user_id, 'text': msg}
requests.post(url, json=params)
send_message(user_id, '소개해드린 축제에 가고싶으신가요??')
stateDB.loc[user_id] = np.nan
else:
send_message(user_id, '올바른 축제 번호를 입력해주세요 !')<|docstring|>사용자가 선택한 축제의 상세 정보 출력<|endoftext|>
|
64513ae002419b12017a24058924fee068e6be865cca097272f52c5a25cdc886
|
def choice_likeFestival(user_id):
'\n 가고 싶은 축제 선택\n '
print('choice_likeFestival')
for row in userInfoDB.rows:
if (row[0].value == user_id):
userRow = row[0].row()
index = contentListCode.index(stateDB.loc[(user_id, 'contentCode')])
userInfoDB.cell(row=userRow, column=(index + 3)).value += 1
db.save(EXCEL_FILE_NAME)
return False
|
가고 싶은 축제 선택
|
Festibot.py
|
choice_likeFestival
|
imeeke83/sba_FestiBot
| 0
|
python
|
def choice_likeFestival(user_id):
'\n \n '
print('choice_likeFestival')
for row in userInfoDB.rows:
if (row[0].value == user_id):
userRow = row[0].row()
index = contentListCode.index(stateDB.loc[(user_id, 'contentCode')])
userInfoDB.cell(row=userRow, column=(index + 3)).value += 1
db.save(EXCEL_FILE_NAME)
return False
|
def choice_likeFestival(user_id):
'\n \n '
print('choice_likeFestival')
for row in userInfoDB.rows:
if (row[0].value == user_id):
userRow = row[0].row()
index = contentListCode.index(stateDB.loc[(user_id, 'contentCode')])
userInfoDB.cell(row=userRow, column=(index + 3)).value += 1
db.save(EXCEL_FILE_NAME)
return False<|docstring|>가고 싶은 축제 선택<|endoftext|>
|
716b5138d987e3dd5924c990c77ea9e0f3a3907bb85e4dc8d472ceb25256099b
|
def set_stateCode_button(user_id, msg, stateCode):
'\n 사용자의 버튼 클릭을 바탕으로 상태코드를 부여.\n D : 기간 입력 필요/ C : 종류 입력 필요\n user_id = 사용자 아이디 코드, button_call : 버튼 입력 내역\n '
print('set_stateCode_button')
if (stateCode == 'D'):
send_message(user_id, '어떤 날짜에 놀러가고 싶어요? 아래 버튼으로 정할 수 있구, 특정 기간을 정하고 싶으면 YYYYMMDD-YYMMDD로 입력해줘요 !')
choice_calendarDate(user_id, msg)
elif (stateCode == 'C'):
send_message(user_id, '어떤 축제에 놀러가고 싶어요?\n제가 축제 종류를 알려드릴께요 !')
send_message(user_id, '1. 문화관광\n2. 일반\n3. 전통공연\n4. 연극\n5. 뮤지컬\n6. 오페라\n7. 전시회\n8. 박람회\n9. 컨벤션\n10. 무용\n11. 클래식음악회\n12. 대중콘서트\n13. 영화\n14. 스포츠경기\n15. 기타행사')
send_message(user_id, '번호 또는 축제 종류를 적어주세요 !')
choice_contentCode(user_id, msg)
|
사용자의 버튼 클릭을 바탕으로 상태코드를 부여.
D : 기간 입력 필요/ C : 종류 입력 필요
user_id = 사용자 아이디 코드, button_call : 버튼 입력 내역
|
Festibot.py
|
set_stateCode_button
|
imeeke83/sba_FestiBot
| 0
|
python
|
def set_stateCode_button(user_id, msg, stateCode):
'\n 사용자의 버튼 클릭을 바탕으로 상태코드를 부여.\n D : 기간 입력 필요/ C : 종류 입력 필요\n user_id = 사용자 아이디 코드, button_call : 버튼 입력 내역\n '
print('set_stateCode_button')
if (stateCode == 'D'):
send_message(user_id, '어떤 날짜에 놀러가고 싶어요? 아래 버튼으로 정할 수 있구, 특정 기간을 정하고 싶으면 YYYYMMDD-YYMMDD로 입력해줘요 !')
choice_calendarDate(user_id, msg)
elif (stateCode == 'C'):
send_message(user_id, '어떤 축제에 놀러가고 싶어요?\n제가 축제 종류를 알려드릴께요 !')
send_message(user_id, '1. 문화관광\n2. 일반\n3. 전통공연\n4. 연극\n5. 뮤지컬\n6. 오페라\n7. 전시회\n8. 박람회\n9. 컨벤션\n10. 무용\n11. 클래식음악회\n12. 대중콘서트\n13. 영화\n14. 스포츠경기\n15. 기타행사')
send_message(user_id, '번호 또는 축제 종류를 적어주세요 !')
choice_contentCode(user_id, msg)
|
def set_stateCode_button(user_id, msg, stateCode):
'\n 사용자의 버튼 클릭을 바탕으로 상태코드를 부여.\n D : 기간 입력 필요/ C : 종류 입력 필요\n user_id = 사용자 아이디 코드, button_call : 버튼 입력 내역\n '
print('set_stateCode_button')
if (stateCode == 'D'):
send_message(user_id, '어떤 날짜에 놀러가고 싶어요? 아래 버튼으로 정할 수 있구, 특정 기간을 정하고 싶으면 YYYYMMDD-YYMMDD로 입력해줘요 !')
choice_calendarDate(user_id, msg)
elif (stateCode == 'C'):
send_message(user_id, '어떤 축제에 놀러가고 싶어요?\n제가 축제 종류를 알려드릴께요 !')
send_message(user_id, '1. 문화관광\n2. 일반\n3. 전통공연\n4. 연극\n5. 뮤지컬\n6. 오페라\n7. 전시회\n8. 박람회\n9. 컨벤션\n10. 무용\n11. 클래식음악회\n12. 대중콘서트\n13. 영화\n14. 스포츠경기\n15. 기타행사')
send_message(user_id, '번호 또는 축제 종류를 적어주세요 !')
choice_contentCode(user_id, msg)<|docstring|>사용자의 버튼 클릭을 바탕으로 상태코드를 부여.
D : 기간 입력 필요/ C : 종류 입력 필요
user_id = 사용자 아이디 코드, button_call : 버튼 입력 내역<|endoftext|>
|
2926d04a019556974d2eec80df85a1fe5064b9205b8c2aae5ee1cad4215ee9d3
|
def GetWebContentLink(self):
'Finds the first link with rel set to WEB_CONTENT_REL\n\n Returns:\n A gdata.calendar.WebContentLink or none if none of the links had rel \n equal to WEB_CONTENT_REL\n '
for a_link in self.link:
if (a_link.rel == WEB_CONTENT_LINK_REL):
return a_link
return None
|
Finds the first link with rel set to WEB_CONTENT_REL
Returns:
A gdata.calendar.WebContentLink or none if none of the links had rel
equal to WEB_CONTENT_REL
|
python/gdata/src/gdata/calendar/__init__.py
|
GetWebContentLink
|
nokibsarkar/sl4a
| 2,293
|
python
|
def GetWebContentLink(self):
'Finds the first link with rel set to WEB_CONTENT_REL\n\n Returns:\n A gdata.calendar.WebContentLink or none if none of the links had rel \n equal to WEB_CONTENT_REL\n '
for a_link in self.link:
if (a_link.rel == WEB_CONTENT_LINK_REL):
return a_link
return None
|
def GetWebContentLink(self):
'Finds the first link with rel set to WEB_CONTENT_REL\n\n Returns:\n A gdata.calendar.WebContentLink or none if none of the links had rel \n equal to WEB_CONTENT_REL\n '
for a_link in self.link:
if (a_link.rel == WEB_CONTENT_LINK_REL):
return a_link
return None<|docstring|>Finds the first link with rel set to WEB_CONTENT_REL
Returns:
A gdata.calendar.WebContentLink or none if none of the links had rel
equal to WEB_CONTENT_REL<|endoftext|>
|
73bd00702ed74ed30b8b7c8bca7a5c80d57c70351fa9c5dfb44df49275374126
|
def query(sql, conn):
'查询 sql'
conn.execute(sql)
rows = conn.fetchall()
return rows
|
查询 sql
|
python/showMeTheCode/0002/index.py
|
query
|
andyzhenghn/StudingNotes
| 0
|
python
|
def query(sql, conn):
conn.execute(sql)
rows = conn.fetchall()
return rows
|
def query(sql, conn):
conn.execute(sql)
rows = conn.fetchall()
return rows<|docstring|>查询 sql<|endoftext|>
|
e402706393af77e3a47a2684f09ed7ba9e49c370ec60f6aa234e8e4311c9c9af
|
def montecarlo_policy_evaluation(episodes, states, reward, discount=0.95):
'\n Performs Monte Carlo Policy Evaluation. Takes in a number of trajectories and\n develops state value estimates for all states over time by computing the average\n reward-to-go obtained at each state over n visits\n :param episodes: A container or generator of trajectories, each trajectory being a List of states\n :param states: The full container of possible states in the MDP\n :param reward: a function accepting a state as an argument and returning a numeric reward\n :param discount: a discount value, between 0 and 1\n :return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states\n to number of visits over the course of the algorithms run\n '
values = {}
visits = {}
sums = {}
for s in states:
values[s] = 0
visits[s] = 0
sums[s] = 0
reward_to_go = _rtg_factory(reward)
for episode in episodes:
i = 0
for (s, reward) in episode:
sums[s] += reward_to_go(episode[i:], discount)
visits[s] += 1
values[s] = (sums[s] / visits[s])
i += 1
return (values, visits)
|
Performs Monte Carlo Policy Evaluation. Takes in a number of trajectories and
develops state value estimates for all states over time by computing the average
reward-to-go obtained at each state over n visits
:param episodes: A container or generator of trajectories, each trajectory being a List of states
:param states: The full container of possible states in the MDP
:param reward: a function accepting a state as an argument and returning a numeric reward
:param discount: a discount value, between 0 and 1
:return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states
to number of visits over the course of the algorithms run
|
algorithms/rl.py
|
montecarlo_policy_evaluation
|
alexander-paskal/PySOP
| 0
|
python
|
def montecarlo_policy_evaluation(episodes, states, reward, discount=0.95):
'\n Performs Monte Carlo Policy Evaluation. Takes in a number of trajectories and\n develops state value estimates for all states over time by computing the average\n reward-to-go obtained at each state over n visits\n :param episodes: A container or generator of trajectories, each trajectory being a List of states\n :param states: The full container of possible states in the MDP\n :param reward: a function accepting a state as an argument and returning a numeric reward\n :param discount: a discount value, between 0 and 1\n :return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states\n to number of visits over the course of the algorithms run\n '
values = {}
visits = {}
sums = {}
for s in states:
values[s] = 0
visits[s] = 0
sums[s] = 0
reward_to_go = _rtg_factory(reward)
for episode in episodes:
i = 0
for (s, reward) in episode:
sums[s] += reward_to_go(episode[i:], discount)
visits[s] += 1
values[s] = (sums[s] / visits[s])
i += 1
return (values, visits)
|
def montecarlo_policy_evaluation(episodes, states, reward, discount=0.95):
'\n Performs Monte Carlo Policy Evaluation. Takes in a number of trajectories and\n develops state value estimates for all states over time by computing the average\n reward-to-go obtained at each state over n visits\n :param episodes: A container or generator of trajectories, each trajectory being a List of states\n :param states: The full container of possible states in the MDP\n :param reward: a function accepting a state as an argument and returning a numeric reward\n :param discount: a discount value, between 0 and 1\n :return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states\n to number of visits over the course of the algorithms run\n '
values = {}
visits = {}
sums = {}
for s in states:
values[s] = 0
visits[s] = 0
sums[s] = 0
reward_to_go = _rtg_factory(reward)
for episode in episodes:
i = 0
for (s, reward) in episode:
sums[s] += reward_to_go(episode[i:], discount)
visits[s] += 1
values[s] = (sums[s] / visits[s])
i += 1
return (values, visits)<|docstring|>Performs Monte Carlo Policy Evaluation. Takes in a number of trajectories and
develops state value estimates for all states over time by computing the average
reward-to-go obtained at each state over n visits
:param episodes: A container or generator of trajectories, each trajectory being a List of states
:param states: The full container of possible states in the MDP
:param reward: a function accepting a state as an argument and returning a numeric reward
:param discount: a discount value, between 0 and 1
:return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states
to number of visits over the course of the algorithms run<|endoftext|>
|
15afc09598c589a825f58db8b70610c8832a29b285a43486d30aadae1ab50ae3
|
def temporal_difference_policy_evaluation(episodes, states, reward, alpha, discount=0.95):
"\n Performs a temporal difference update on state value estimations by evaluating\n trajectory values and updating by weighted difference between current sample and\n previous estimation.\n\n V(s) <- V(s) + alpha ( R(s) + discount*V(s') - V(s))\n\n :param episodes:\n :param states:\n :param reward:\n :param alpha:\n :param discount:\n :return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states\n to number of visits over the course of the algorithms run\n "
values = {}
visits = {}
for s in states:
values[s] = 0
visits[s] = 0
for episode in episodes:
episode.append(None)
i = 0
s = episode[i]
r = reward(s)
while (s is not None):
next_s = episode[(i + 1)]
if (next_s is None):
next_td = 0
next_r = None
else:
next_r = reward(next_s)
next_td = values[next_s]
alp = alpha((visits[s] + 1))
td = values[s]
result = (td + (alp * ((r + (discount * next_td)) - td)))
values[s] = result
visits[s] += 1
i += 1
(s, r) = (next_s, next_r)
return (values, visits)
|
Performs a temporal difference update on state value estimations by evaluating
trajectory values and updating by weighted difference between current sample and
previous estimation.
V(s) <- V(s) + alpha ( R(s) + discount*V(s') - V(s))
:param episodes:
:param states:
:param reward:
:param alpha:
:param discount:
:return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states
to number of visits over the course of the algorithms run
|
algorithms/rl.py
|
temporal_difference_policy_evaluation
|
alexander-paskal/PySOP
| 0
|
python
|
def temporal_difference_policy_evaluation(episodes, states, reward, alpha, discount=0.95):
"\n Performs a temporal difference update on state value estimations by evaluating\n trajectory values and updating by weighted difference between current sample and\n previous estimation.\n\n V(s) <- V(s) + alpha ( R(s) + discount*V(s') - V(s))\n\n :param episodes:\n :param states:\n :param reward:\n :param alpha:\n :param discount:\n :return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states\n to number of visits over the course of the algorithms run\n "
values = {}
visits = {}
for s in states:
values[s] = 0
visits[s] = 0
for episode in episodes:
episode.append(None)
i = 0
s = episode[i]
r = reward(s)
while (s is not None):
next_s = episode[(i + 1)]
if (next_s is None):
next_td = 0
next_r = None
else:
next_r = reward(next_s)
next_td = values[next_s]
alp = alpha((visits[s] + 1))
td = values[s]
result = (td + (alp * ((r + (discount * next_td)) - td)))
values[s] = result
visits[s] += 1
i += 1
(s, r) = (next_s, next_r)
return (values, visits)
|
def temporal_difference_policy_evaluation(episodes, states, reward, alpha, discount=0.95):
"\n Performs a temporal difference update on state value estimations by evaluating\n trajectory values and updating by weighted difference between current sample and\n previous estimation.\n\n V(s) <- V(s) + alpha ( R(s) + discount*V(s') - V(s))\n\n :param episodes:\n :param states:\n :param reward:\n :param alpha:\n :param discount:\n :return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states\n to number of visits over the course of the algorithms run\n "
values = {}
visits = {}
for s in states:
values[s] = 0
visits[s] = 0
for episode in episodes:
episode.append(None)
i = 0
s = episode[i]
r = reward(s)
while (s is not None):
next_s = episode[(i + 1)]
if (next_s is None):
next_td = 0
next_r = None
else:
next_r = reward(next_s)
next_td = values[next_s]
alp = alpha((visits[s] + 1))
td = values[s]
result = (td + (alp * ((r + (discount * next_td)) - td)))
values[s] = result
visits[s] += 1
i += 1
(s, r) = (next_s, next_r)
return (values, visits)<|docstring|>Performs a temporal difference update on state value estimations by evaluating
trajectory values and updating by weighted difference between current sample and
previous estimation.
V(s) <- V(s) + alpha ( R(s) + discount*V(s') - V(s))
:param episodes:
:param states:
:param reward:
:param alpha:
:param discount:
:return: values, visits: Dict mapping states to value estimates based on passed-in episodes, Dict mapping states
to number of visits over the course of the algorithms run<|endoftext|>
|
392893f6a4df1ff343fbae8523f6fdca48b72fccd018b91bf97c633744cecc6f
|
def tabular_q_learning(episodes, states, actions, reward, alpha, discount=0.95, epsilon=0.4, seed=0):
'\n Performs epsilon-greedy q-learning. Accepts a number of episodes over which to perform learning,\n updates Q-values for every state-action pair based on results of training.\n :param episodes:\n :param states:\n :param actions:\n :param reward:\n :param alpha:\n :param discount:\n :param epsilon:\n :param seed:\n :return:\n '
random.seed(seed)
values = {}
visits = {}
for s in states:
values[s] = {a: 0 for a in actions[s]}
visits[s] = 0
for episode in episodes:
episode.append(None)
i = 0
s = episode[i]
r = reward(s)
while (s is not None):
action = pick_action(s, values, epsilon)
next_s = episode[(i + 1)]
if (next_s is None):
next_q = 0
next_r = None
else:
next_r = reward(next_s)
next_q = values[next_s]
alp = alpha((visits[s] + 1))
q = values[s][action]
result = (q + (alp * ((r + (discount * next_q)) - q)))
values[s] = result
visits[s] += 1
i += 1
(s, r) = (next_s, next_r)
return (values, visits)
|
Performs epsilon-greedy q-learning. Accepts a number of episodes over which to perform learning,
updates Q-values for every state-action pair based on results of training.
:param episodes:
:param states:
:param actions:
:param reward:
:param alpha:
:param discount:
:param epsilon:
:param seed:
:return:
|
algorithms/rl.py
|
tabular_q_learning
|
alexander-paskal/PySOP
| 0
|
python
|
def tabular_q_learning(episodes, states, actions, reward, alpha, discount=0.95, epsilon=0.4, seed=0):
'\n Performs epsilon-greedy q-learning. Accepts a number of episodes over which to perform learning,\n updates Q-values for every state-action pair based on results of training.\n :param episodes:\n :param states:\n :param actions:\n :param reward:\n :param alpha:\n :param discount:\n :param epsilon:\n :param seed:\n :return:\n '
random.seed(seed)
values = {}
visits = {}
for s in states:
values[s] = {a: 0 for a in actions[s]}
visits[s] = 0
for episode in episodes:
episode.append(None)
i = 0
s = episode[i]
r = reward(s)
while (s is not None):
action = pick_action(s, values, epsilon)
next_s = episode[(i + 1)]
if (next_s is None):
next_q = 0
next_r = None
else:
next_r = reward(next_s)
next_q = values[next_s]
alp = alpha((visits[s] + 1))
q = values[s][action]
result = (q + (alp * ((r + (discount * next_q)) - q)))
values[s] = result
visits[s] += 1
i += 1
(s, r) = (next_s, next_r)
return (values, visits)
|
def tabular_q_learning(episodes, states, actions, reward, alpha, discount=0.95, epsilon=0.4, seed=0):
'\n Performs epsilon-greedy q-learning. Accepts a number of episodes over which to perform learning,\n updates Q-values for every state-action pair based on results of training.\n :param episodes:\n :param states:\n :param actions:\n :param reward:\n :param alpha:\n :param discount:\n :param epsilon:\n :param seed:\n :return:\n '
random.seed(seed)
values = {}
visits = {}
for s in states:
values[s] = {a: 0 for a in actions[s]}
visits[s] = 0
for episode in episodes:
episode.append(None)
i = 0
s = episode[i]
r = reward(s)
while (s is not None):
action = pick_action(s, values, epsilon)
next_s = episode[(i + 1)]
if (next_s is None):
next_q = 0
next_r = None
else:
next_r = reward(next_s)
next_q = values[next_s]
alp = alpha((visits[s] + 1))
q = values[s][action]
result = (q + (alp * ((r + (discount * next_q)) - q)))
values[s] = result
visits[s] += 1
i += 1
(s, r) = (next_s, next_r)
return (values, visits)<|docstring|>Performs epsilon-greedy q-learning. Accepts a number of episodes over which to perform learning,
updates Q-values for every state-action pair based on results of training.
:param episodes:
:param states:
:param actions:
:param reward:
:param alpha:
:param discount:
:param epsilon:
:param seed:
:return:<|endoftext|>
|
17ca00eb92274d99d46ead4caf2a08dc56d932353a16dd9be39501bc2bbf77bd
|
def pick_action(s, values, epsilon):
'\n Chooses an action for s based on an epsilon greedy strategy\n :param s: the state being evaluated\n :param values: The Q-values for all s-a pairs, nest Dict\n :param epsilon: the threshold for random choice, governing exploration vs. exploitation\n :return:\n '
if (random.random() < epsilon):
return random.choice(values[s].keys())
max_q_val = (- math.inf)
max_action = None
for (action, value) in values[s].items():
if (max_q_val < value):
max_q_val = value
max_action = action
return max_action
|
Chooses an action for s based on an epsilon greedy strategy
:param s: the state being evaluated
:param values: The Q-values for all s-a pairs, nest Dict
:param epsilon: the threshold for random choice, governing exploration vs. exploitation
:return:
|
algorithms/rl.py
|
pick_action
|
alexander-paskal/PySOP
| 0
|
python
|
def pick_action(s, values, epsilon):
'\n Chooses an action for s based on an epsilon greedy strategy\n :param s: the state being evaluated\n :param values: The Q-values for all s-a pairs, nest Dict\n :param epsilon: the threshold for random choice, governing exploration vs. exploitation\n :return:\n '
if (random.random() < epsilon):
return random.choice(values[s].keys())
max_q_val = (- math.inf)
max_action = None
for (action, value) in values[s].items():
if (max_q_val < value):
max_q_val = value
max_action = action
return max_action
|
def pick_action(s, values, epsilon):
'\n Chooses an action for s based on an epsilon greedy strategy\n :param s: the state being evaluated\n :param values: The Q-values for all s-a pairs, nest Dict\n :param epsilon: the threshold for random choice, governing exploration vs. exploitation\n :return:\n '
if (random.random() < epsilon):
return random.choice(values[s].keys())
max_q_val = (- math.inf)
max_action = None
for (action, value) in values[s].items():
if (max_q_val < value):
max_q_val = value
max_action = action
return max_action<|docstring|>Chooses an action for s based on an epsilon greedy strategy
:param s: the state being evaluated
:param values: The Q-values for all s-a pairs, nest Dict
:param epsilon: the threshold for random choice, governing exploration vs. exploitation
:return:<|endoftext|>
|
5740ad327d1c3eae174852417deee798d609736db5995fb6a8306ce872fab31b
|
def reward_to_go(trajectory, discount):
'\n computes the reward-to-go for a given trajectory\n :param trajectory: List of states\n :param reward: function accepting state as argument and returning numeric reward\n :param gamma: discount factor, between 0 and 1\n :return:\n '
rtg = 0
for (i, state) in enumerate(trajectory):
r = reward(state)
rtg += ((discount ** i) * r)
return rtg
|
computes the reward-to-go for a given trajectory
:param trajectory: List of states
:param reward: function accepting state as argument and returning numeric reward
:param gamma: discount factor, between 0 and 1
:return:
|
algorithms/rl.py
|
reward_to_go
|
alexander-paskal/PySOP
| 0
|
python
|
def reward_to_go(trajectory, discount):
'\n computes the reward-to-go for a given trajectory\n :param trajectory: List of states\n :param reward: function accepting state as argument and returning numeric reward\n :param gamma: discount factor, between 0 and 1\n :return:\n '
rtg = 0
for (i, state) in enumerate(trajectory):
r = reward(state)
rtg += ((discount ** i) * r)
return rtg
|
def reward_to_go(trajectory, discount):
'\n computes the reward-to-go for a given trajectory\n :param trajectory: List of states\n :param reward: function accepting state as argument and returning numeric reward\n :param gamma: discount factor, between 0 and 1\n :return:\n '
rtg = 0
for (i, state) in enumerate(trajectory):
r = reward(state)
rtg += ((discount ** i) * r)
return rtg<|docstring|>computes the reward-to-go for a given trajectory
:param trajectory: List of states
:param reward: function accepting state as argument and returning numeric reward
:param gamma: discount factor, between 0 and 1
:return:<|endoftext|>
|
b61d1b949118d8d8ca5e21782d62a5853945dafafada6330969f19e930e7d154
|
def _nvp_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id):
'Verify data on fake NVP API client in order to validate\n plugin did set them properly\n '
ports = [port for port in self.fc._fake_lrouter_lport_dict.values() if ((port['lr_uuid'] == router_id) and (port['att_type'] == 'L3GatewayAttachment'))]
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid)
self.assertEqual(ports[0].get('vlan_id'), vlan_id)
|
Verify data on fake NVP API client in order to validate
plugin did set them properly
|
neutron/tests/unit/nicira/test_nicira_plugin.py
|
_nvp_validate_ext_gw
|
osrg/quantum
| 1
|
python
|
def _nvp_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id):
'Verify data on fake NVP API client in order to validate\n plugin did set them properly\n '
ports = [port for port in self.fc._fake_lrouter_lport_dict.values() if ((port['lr_uuid'] == router_id) and (port['att_type'] == 'L3GatewayAttachment'))]
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid)
self.assertEqual(ports[0].get('vlan_id'), vlan_id)
|
def _nvp_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id):
'Verify data on fake NVP API client in order to validate\n plugin did set them properly\n '
ports = [port for port in self.fc._fake_lrouter_lport_dict.values() if ((port['lr_uuid'] == router_id) and (port['att_type'] == 'L3GatewayAttachment'))]
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid)
self.assertEqual(ports[0].get('vlan_id'), vlan_id)<|docstring|>Verify data on fake NVP API client in order to validate
plugin did set them properly<|endoftext|>
|
9f1af6f94382d2091b94b151b9ada79a1bb55397006e943d0526397e6cc26494
|
def __init__(self, experiment, config, auto_config=False):
'Initializes Sacred Experiments\n Sacred related settings\n :param experiment: sacared Experiment object\n :param config: config dic\n :param auto_config: if true, all settings from Sacred are\n configured on init\n '
super().__init__()
self.ex = experiment
self.ex.add_config(config)
self.config = config['logging']['sacred_logs']
if (auto_config is True):
self.add_mongo_observer(self.config)
self.add_settings(self.config['settings'])
|
Initializes Sacred Experiments
Sacred related settings
:param experiment: sacared Experiment object
:param config: config dic
:param auto_config: if true, all settings from Sacred are
configured on init
|
mtorch/core/experiment/sacred.py
|
__init__
|
NullConvergence/torch_temp
| 3
|
python
|
def __init__(self, experiment, config, auto_config=False):
'Initializes Sacred Experiments\n Sacred related settings\n :param experiment: sacared Experiment object\n :param config: config dic\n :param auto_config: if true, all settings from Sacred are\n configured on init\n '
super().__init__()
self.ex = experiment
self.ex.add_config(config)
self.config = config['logging']['sacred_logs']
if (auto_config is True):
self.add_mongo_observer(self.config)
self.add_settings(self.config['settings'])
|
def __init__(self, experiment, config, auto_config=False):
'Initializes Sacred Experiments\n Sacred related settings\n :param experiment: sacared Experiment object\n :param config: config dic\n :param auto_config: if true, all settings from Sacred are\n configured on init\n '
super().__init__()
self.ex = experiment
self.ex.add_config(config)
self.config = config['logging']['sacred_logs']
if (auto_config is True):
self.add_mongo_observer(self.config)
self.add_settings(self.config['settings'])<|docstring|>Initializes Sacred Experiments
Sacred related settings
:param experiment: sacared Experiment object
:param config: config dic
:param auto_config: if true, all settings from Sacred are
configured on init<|endoftext|>
|
7291df27ba9fd162375d52b505e02afecf5997f9489d04147cdd2537dc59d2d8
|
def get_query_results(query):
'Connect to the database with taking the SELECT statement as a parameter\n If working well, returns the SQL result, otherwise raise error.\n\n Args:\n query: A sequense of strings representing SQL SELECT statement.\n\n Returns:\n A list of SQL result that is fetched to the correcponding data.\n\n Raises:\n IOError: An Error occured if raised database error.\n '
try:
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
result = c.fetchall()
db.close()
return result
except Exception as e:
print(type(e))
print(('Database error: ' + str(e)))
exit(1)
|
Connect to the database with taking the SELECT statement as a parameter
If working well, returns the SQL result, otherwise raise error.
Args:
query: A sequense of strings representing SQL SELECT statement.
Returns:
A list of SQL result that is fetched to the correcponding data.
Raises:
IOError: An Error occured if raised database error.
|
report.py
|
get_query_results
|
Poko56/udacity-log-analysis
| 0
|
python
|
def get_query_results(query):
'Connect to the database with taking the SELECT statement as a parameter\n If working well, returns the SQL result, otherwise raise error.\n\n Args:\n query: A sequense of strings representing SQL SELECT statement.\n\n Returns:\n A list of SQL result that is fetched to the correcponding data.\n\n Raises:\n IOError: An Error occured if raised database error.\n '
try:
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
result = c.fetchall()
db.close()
return result
except Exception as e:
print(type(e))
print(('Database error: ' + str(e)))
exit(1)
|
def get_query_results(query):
'Connect to the database with taking the SELECT statement as a parameter\n If working well, returns the SQL result, otherwise raise error.\n\n Args:\n query: A sequense of strings representing SQL SELECT statement.\n\n Returns:\n A list of SQL result that is fetched to the correcponding data.\n\n Raises:\n IOError: An Error occured if raised database error.\n '
try:
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
result = c.fetchall()
db.close()
return result
except Exception as e:
print(type(e))
print(('Database error: ' + str(e)))
exit(1)<|docstring|>Connect to the database with taking the SELECT statement as a parameter
If working well, returns the SQL result, otherwise raise error.
Args:
query: A sequense of strings representing SQL SELECT statement.
Returns:
A list of SQL result that is fetched to the correcponding data.
Raises:
IOError: An Error occured if raised database error.<|endoftext|>
|
5152f5b131167062ee18008df79e6fd5dd8ffcbd683fe5671e22b918d5c32195
|
def init_weights(self):
'Initiate the parameters from scratch.'
normal_init(self.fc_cls, std=self.init_std)
|
Initiate the parameters from scratch.
|
mmaction/models/heads/i3d_head.py
|
init_weights
|
wangqixun/VideoTemporalDetectionZeroShot
| 0
|
python
|
def init_weights(self):
normal_init(self.fc_cls, std=self.init_std)
|
def init_weights(self):
normal_init(self.fc_cls, std=self.init_std)<|docstring|>Initiate the parameters from scratch.<|endoftext|>
|
95b09a40d7ce4323a4820b0e0c1653d2f702f93fe6fa1e2962fb52e5511c95ce
|
def forward(self, x):
'Defines the computation performed at every call.\n\n Args:\n x (torch.Tensor): The input data.\n\n Returns:\n torch.Tensor: The classification scores for input samples.\n '
if (self.avg_pool is not None):
x = self.avg_pool(x)
if (self.dropout is not None):
x = self.dropout(x)
x = x.view(x.shape[0], (- 1))
cls_score = self.fc_cls(x)
return cls_score
|
Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
|
mmaction/models/heads/i3d_head.py
|
forward
|
wangqixun/VideoTemporalDetectionZeroShot
| 0
|
python
|
def forward(self, x):
'Defines the computation performed at every call.\n\n Args:\n x (torch.Tensor): The input data.\n\n Returns:\n torch.Tensor: The classification scores for input samples.\n '
if (self.avg_pool is not None):
x = self.avg_pool(x)
if (self.dropout is not None):
x = self.dropout(x)
x = x.view(x.shape[0], (- 1))
cls_score = self.fc_cls(x)
return cls_score
|
def forward(self, x):
'Defines the computation performed at every call.\n\n Args:\n x (torch.Tensor): The input data.\n\n Returns:\n torch.Tensor: The classification scores for input samples.\n '
if (self.avg_pool is not None):
x = self.avg_pool(x)
if (self.dropout is not None):
x = self.dropout(x)
x = x.view(x.shape[0], (- 1))
cls_score = self.fc_cls(x)
return cls_score<|docstring|>Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.<|endoftext|>
|
40f4a7f74ffb96959dcaffe36b7725a8f5a9326c285c165e9a3df8910e25cd65
|
def init_weights(self):
'Initiate the parameters from scratch.'
pass
|
Initiate the parameters from scratch.
|
mmaction/models/heads/i3d_head.py
|
init_weights
|
wangqixun/VideoTemporalDetectionZeroShot
| 0
|
python
|
def init_weights(self):
pass
|
def init_weights(self):
pass<|docstring|>Initiate the parameters from scratch.<|endoftext|>
|
c8c84d2be6568bec9619bf658c6f7c037a18bb015a18de8783860c1bf2635a9b
|
def multilabel_categorical_crossentropy(self, y_true, y_pred):
'多标签分类的交叉熵\n 说明:y_true和y_pred的shape一致,y_true的元素非0即1,\n 1表示对应的类为目标类,0表示对应的类为非目标类。\n 警告:请保证y_pred的值域是全体实数,换言之一般情况下y_pred\n 不用加激活函数,尤其是不能加sigmoid或者softmax!预测\n 阶段则输出y_pred大于0的类。如有疑问,请仔细阅读并理解\n 本文。\n '
y_pred = ((1 - (2 * y_true)) * y_pred)
y_pred_neg = (y_pred - (y_true * 1000000000000.0))
y_pred_pos = (y_pred - ((1 - y_true) * 1000000000000.0))
zeros = torch.zeros_like(y_pred[(..., :1)])
y_pred_neg = torch.cat([y_pred_neg, zeros], dim=(- 1))
y_pred_pos = torch.cat([y_pred_pos, zeros], dim=(- 1))
neg_loss = torch.logsumexp(y_pred_neg, dim=(- 1))
pos_loss = torch.logsumexp(y_pred_pos, dim=(- 1))
loss = (neg_loss + pos_loss)
return loss
|
多标签分类的交叉熵
说明:y_true和y_pred的shape一致,y_true的元素非0即1,
1表示对应的类为目标类,0表示对应的类为非目标类。
警告:请保证y_pred的值域是全体实数,换言之一般情况下y_pred
不用加激活函数,尤其是不能加sigmoid或者softmax!预测
阶段则输出y_pred大于0的类。如有疑问,请仔细阅读并理解
本文。
|
mmaction/models/heads/i3d_head.py
|
multilabel_categorical_crossentropy
|
wangqixun/VideoTemporalDetectionZeroShot
| 0
|
python
|
def multilabel_categorical_crossentropy(self, y_true, y_pred):
'多标签分类的交叉熵\n 说明:y_true和y_pred的shape一致,y_true的元素非0即1,\n 1表示对应的类为目标类,0表示对应的类为非目标类。\n 警告:请保证y_pred的值域是全体实数,换言之一般情况下y_pred\n 不用加激活函数,尤其是不能加sigmoid或者softmax!预测\n 阶段则输出y_pred大于0的类。如有疑问,请仔细阅读并理解\n 本文。\n '
y_pred = ((1 - (2 * y_true)) * y_pred)
y_pred_neg = (y_pred - (y_true * 1000000000000.0))
y_pred_pos = (y_pred - ((1 - y_true) * 1000000000000.0))
zeros = torch.zeros_like(y_pred[(..., :1)])
y_pred_neg = torch.cat([y_pred_neg, zeros], dim=(- 1))
y_pred_pos = torch.cat([y_pred_pos, zeros], dim=(- 1))
neg_loss = torch.logsumexp(y_pred_neg, dim=(- 1))
pos_loss = torch.logsumexp(y_pred_pos, dim=(- 1))
loss = (neg_loss + pos_loss)
return loss
|
def multilabel_categorical_crossentropy(self, y_true, y_pred):
'多标签分类的交叉熵\n 说明:y_true和y_pred的shape一致,y_true的元素非0即1,\n 1表示对应的类为目标类,0表示对应的类为非目标类。\n 警告:请保证y_pred的值域是全体实数,换言之一般情况下y_pred\n 不用加激活函数,尤其是不能加sigmoid或者softmax!预测\n 阶段则输出y_pred大于0的类。如有疑问,请仔细阅读并理解\n 本文。\n '
y_pred = ((1 - (2 * y_true)) * y_pred)
y_pred_neg = (y_pred - (y_true * 1000000000000.0))
y_pred_pos = (y_pred - ((1 - y_true) * 1000000000000.0))
zeros = torch.zeros_like(y_pred[(..., :1)])
y_pred_neg = torch.cat([y_pred_neg, zeros], dim=(- 1))
y_pred_pos = torch.cat([y_pred_pos, zeros], dim=(- 1))
neg_loss = torch.logsumexp(y_pred_neg, dim=(- 1))
pos_loss = torch.logsumexp(y_pred_pos, dim=(- 1))
loss = (neg_loss + pos_loss)
return loss<|docstring|>多标签分类的交叉熵
说明:y_true和y_pred的shape一致,y_true的元素非0即1,
1表示对应的类为目标类,0表示对应的类为非目标类。
警告:请保证y_pred的值域是全体实数,换言之一般情况下y_pred
不用加激活函数,尤其是不能加sigmoid或者softmax!预测
阶段则输出y_pred大于0的类。如有疑问,请仔细阅读并理解
本文。<|endoftext|>
|
b0b0c2ae610c7c2feb89a136cca58a055881c0e0cf4eba0fad4b8788f63f73bf
|
def global_pointer_crossentropy(self, y_true, y_pred):
'给GlobalPointer设计的交叉熵\n '
y_pred = y_pred['global_pointer_cls']
gt_iou_map = y_true
y_true = (gt_iou_map > 0.9).float()
(bs, N, N) = y_pred.shape
y_true = y_true.reshape([bs, (- 1)])
y_pred = y_pred.reshape([bs, (- 1)])
return torch.mean(self.multilabel_categorical_crossentropy(y_true, y_pred))
|
给GlobalPointer设计的交叉熵
|
mmaction/models/heads/i3d_head.py
|
global_pointer_crossentropy
|
wangqixun/VideoTemporalDetectionZeroShot
| 0
|
python
|
def global_pointer_crossentropy(self, y_true, y_pred):
'\n '
y_pred = y_pred['global_pointer_cls']
gt_iou_map = y_true
y_true = (gt_iou_map > 0.9).float()
(bs, N, N) = y_pred.shape
y_true = y_true.reshape([bs, (- 1)])
y_pred = y_pred.reshape([bs, (- 1)])
return torch.mean(self.multilabel_categorical_crossentropy(y_true, y_pred))
|
def global_pointer_crossentropy(self, y_true, y_pred):
'\n '
y_pred = y_pred['global_pointer_cls']
gt_iou_map = y_true
y_true = (gt_iou_map > 0.9).float()
(bs, N, N) = y_pred.shape
y_true = y_true.reshape([bs, (- 1)])
y_pred = y_pred.reshape([bs, (- 1)])
return torch.mean(self.multilabel_categorical_crossentropy(y_true, y_pred))<|docstring|>给GlobalPointer设计的交叉熵<|endoftext|>
|
552462b5fa9f308b8d5b6ed1a6ca572ec73431fdc413cee0db7a1a8f127a6d92
|
def set_script(self, callbacks):
'Set a scripted sequence of callbacks.\n\n :param callbacks: The callbacks. They should be a list of 2-tuples.\n `("name_of_message", {"callback_name": arguments})`. E.g.,\n ```\n [\n ("run", {"on_success": ({},), "on_summary": None}),\n ("pull", {\n "on_success": None,\n "on_summary": None,\n "on_records":\n })\n ]\n ```\n Note that arguments can be `None`. In this case, ScriptedConnection\n will make a guess on best-suited default arguments.\n '
self._script = callbacks
self._script_pos = 0
|
Set a scripted sequence of callbacks.
:param callbacks: The callbacks. They should be a list of 2-tuples.
`("name_of_message", {"callback_name": arguments})`. E.g.,
```
[
("run", {"on_success": ({},), "on_summary": None}),
("pull", {
"on_success": None,
"on_summary": None,
"on_records":
})
]
```
Note that arguments can be `None`. In this case, ScriptedConnection
will make a guess on best-suited default arguments.
|
tests/unit/async_/work/_fake_connection.py
|
set_script
|
polyrize/neo4j-python-driver
| 0
|
python
|
def set_script(self, callbacks):
'Set a scripted sequence of callbacks.\n\n :param callbacks: The callbacks. They should be a list of 2-tuples.\n `("name_of_message", {"callback_name": arguments})`. E.g.,\n ```\n [\n ("run", {"on_success": ({},), "on_summary": None}),\n ("pull", {\n "on_success": None,\n "on_summary": None,\n "on_records":\n })\n ]\n ```\n Note that arguments can be `None`. In this case, ScriptedConnection\n will make a guess on best-suited default arguments.\n '
self._script = callbacks
self._script_pos = 0
|
def set_script(self, callbacks):
'Set a scripted sequence of callbacks.\n\n :param callbacks: The callbacks. They should be a list of 2-tuples.\n `("name_of_message", {"callback_name": arguments})`. E.g.,\n ```\n [\n ("run", {"on_success": ({},), "on_summary": None}),\n ("pull", {\n "on_success": None,\n "on_summary": None,\n "on_records":\n })\n ]\n ```\n Note that arguments can be `None`. In this case, ScriptedConnection\n will make a guess on best-suited default arguments.\n '
self._script = callbacks
self._script_pos = 0<|docstring|>Set a scripted sequence of callbacks.
:param callbacks: The callbacks. They should be a list of 2-tuples.
`("name_of_message", {"callback_name": arguments})`. E.g.,
```
[
("run", {"on_success": ({},), "on_summary": None}),
("pull", {
"on_success": None,
"on_summary": None,
"on_records":
})
]
```
Note that arguments can be `None`. In this case, ScriptedConnection
will make a guess on best-suited default arguments.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.