hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7953ce9836a274d9c32140c193b43e238251adbf | 31,921 | py | Python | Incident-Response/Tools/dfirtrack/dfirtrack_artifacts/tests/artifact/test_artifact_exporter_spreadsheet_xls_views.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_artifacts/tests/artifact/test_artifact_exporter_spreadsheet_xls_views.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_artifacts/tests/artifact/test_artifact_exporter_spreadsheet_xls_views.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from dfirtrack_artifacts.exporter.spreadsheet.xls import artifact_cron
from dfirtrack_artifacts.models import Artifact, Artifactstatus, Artifacttype
from dfirtrack_config.models import ArtifactExporterSpreadsheetXlsConfigModel, MainConfigModel
from dfirtrack_main.models import System, Systemstatus
from mock import patch
import urllib.parse
import xlrd
class ArtifactExporterSpreadsheetXlsViewTestCase(TestCase):
""" artifact exporter spreadsheet XLS view tests """
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(username='testuser_artifact_exporter_spreadsheet_xls', password='LTzoNHIdxiJydsaJKf1G')
# create object
artifactstatus_3 = Artifactstatus.objects.create(artifactstatus_name = 'artifactstatus_3')
# create object
artifactstatus_1 = Artifactstatus.objects.create(
artifactstatus_name='artifactstatus_1',
artifactstatus_note='lorem ipsum',
)
# create objects
artifacttype_1 = Artifacttype.objects.create(artifacttype_name='artifacttype_1')
artifacttype_2 = Artifacttype.objects.create(
artifacttype_name='artifacttype_2',
artifacttype_note='lorem ipsum',
)
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
system_1 = System.objects.create(
system_name='artifact_exporter_spreadsheet_xls_system_1',
systemstatus = systemstatus_1,
system_modify_time = timezone.now(),
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
""" create artifacts """
# mock timezone.now()
t_1 = datetime(2012, 11, 10, 12, 34, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_1):
# create object with maximum attributes
Artifact.objects.create(
artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_1_all_attributes',
artifactstatus = artifactstatus_3,
artifacttype = artifacttype_1,
system = system_1,
artifact_source_path = 'C:\Temp\malicious.exe',
artifact_note_internal = 'artifact note for internal usage',
artifact_note_external = 'artifact note for external usage',
artifact_note_analysisresult = 'artifact note for analysis result',
artifact_md5 = 'd41d8cd98f00b204e9800998ecf8427e',
artifact_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
artifact_sha256 = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
# mock timezone.now()
t_2 = datetime(2009, 8, 7, 23, 45, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_2):
# create object with minimum attributes
Artifact.objects.create(
artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_2_no_attributes',
artifactstatus = artifactstatus_3,
artifacttype = artifacttype_1,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
# create object that will not be exported
Artifact.objects.create(
artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_3_not_exported',
artifactstatus = artifactstatus_1,
artifacttype = artifacttype_2,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
def test_artifact_exporter_spreadsheet_xls_not_logged_in(self):
""" test exporter view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/artifacts/artifact/exporter/spreadsheet/xls/artifact/', safe='')
# get response
response = self.client.get('/artifacts/artifact/exporter/spreadsheet/xls/artifact/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_artifact_exporter_spreadsheet_xls_logged_in(self):
""" test exporter view """
# login testuser
self.client.login(username='testuser_artifact_exporter_spreadsheet_xls', password='LTzoNHIdxiJydsaJKf1G')
# get response
response = self.client.get('/artifacts/artifact/exporter/spreadsheet/xls/artifact/')
# compare
self.assertEqual(response.status_code, 200)
def test_artifact_exporter_spreadsheet_xls_redirect(self):
""" test exporter view """
# login testuser
self.client.login(username='testuser_artifact_exporter_spreadsheet_xls', password='LTzoNHIdxiJydsaJKf1G')
# create url
destination = urllib.parse.quote('/artifacts/artifact/exporter/spreadsheet/xls/artifact/', safe='/')
# get response
response = self.client.get('/artifacts/artifact/exporter/spreadsheet/xls/artifact', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_artifact_exporter_spreadsheet_xls_minimal_spreadsheet(self):
""" test exporter view """
""" modify config section """
# get and modify config to show only mandatory columns
artifact_exporter_spreadsheet_xls_config_model = ArtifactExporterSpreadsheetXlsConfigModel.objects.get(artifact_exporter_spreadsheet_xls_config_name='ArtifactExporterSpreadsheetXlsConfig')
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_id = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_system_id = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_system_name = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifactstatus = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifactpriority = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifacttype = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_source_path = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_storage_path = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_internal = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_external = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_analysisresult = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_md5 = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_sha1 = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_sha256 = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_create_time = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_modify_time = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_worksheet_artifactstatus = False
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_worksheet_artifacttype = False
artifact_exporter_spreadsheet_xls_config_model.save()
# get object
artifactstatus_3 = Artifactstatus.objects.get(artifactstatus_name = 'artifactstatus_3')
# add artifactstatus to choice for export
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_choice_artifactstatus.add(artifactstatus_3)
""" call view section """
# login testuser
self.client.login(username='testuser_artifact_exporter_spreadsheet_xls', password='LTzoNHIdxiJydsaJKf1G')
# mock timezone.now()
t1_now = timezone.now()
with patch.object(timezone, 'now', return_value=t1_now):
# get response
response = self.client.get('/artifacts/artifact/exporter/spreadsheet/xls/artifact/')
""" get file section """
# get artifactlist from response content
workbook = response.content
# open artifactlist directly from byte stream
artifactlist = xlrd.open_workbook(file_contents=workbook)
""" prepare objects section """
# get objects
artifact_1 = Artifact.objects.get(artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_1_all_attributes')
artifact_2 = Artifact.objects.get(artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_2_no_attributes')
# get sheets
sheet_artifacts = artifactlist.sheet_by_name('artifacts')
""" compare values section """
# compare non-available sheets
self.assertRaises(xlrd.biffh.XLRDError, artifactlist.sheet_by_name, sheet_name='artifactstatus')
self.assertRaises(xlrd.biffh.XLRDError, artifactlist.sheet_by_name, sheet_name='artifacttype')
# compare number of rows and columns
self.assertEqual(sheet_artifacts.nrows, 6)
self.assertEqual(sheet_artifacts.ncols, 2)
# compare headlines
self.assertEqual(sheet_artifacts.row_values(0), ['Artifact', ''])
# compare content - artifact 1
self.assertEqual(sheet_artifacts.cell(1,0).value, artifact_1.artifact_name)
# compare content - artifact 2
self.assertEqual(sheet_artifacts.cell(2,0).value, artifact_2.artifact_name)
# compare content - metadata
self.assertEqual(sheet_artifacts.cell(4,0).value, 'Created:')
self.assertEqual(sheet_artifacts.cell(4,1).value, t1_now.strftime('%Y-%m-%d %H:%M'))
self.assertEqual(sheet_artifacts.cell(5,0).value, 'Created by:')
self.assertEqual(sheet_artifacts.cell(5,1).value, 'testuser_artifact_exporter_spreadsheet_xls')
def test_artifact_exporter_spreadsheet_xls_complete_spreadsheet(self):
""" test exporter view """
""" modify config section """
# get and modify config to show all columns and sheets
artifact_exporter_spreadsheet_xls_config_model = ArtifactExporterSpreadsheetXlsConfigModel.objects.get(artifact_exporter_spreadsheet_xls_config_name='ArtifactExporterSpreadsheetXlsConfig')
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_id = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_system_id = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_system_name = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifactstatus = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifactpriority = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifacttype = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_source_path = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_storage_path = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_internal = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_external = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_analysisresult = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_md5 = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_sha1 = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_sha256 = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_create_time = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_modify_time = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_worksheet_artifactstatus = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_worksheet_artifacttype = True
artifact_exporter_spreadsheet_xls_config_model.save()
# get object
artifactstatus_3 = Artifactstatus.objects.get(artifactstatus_name = 'artifactstatus_3')
# add artifactstatus to choice for export
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_choice_artifactstatus.add(artifactstatus_3)
""" call view section """
# login testuser
self.client.login(username='testuser_artifact_exporter_spreadsheet_xls', password='LTzoNHIdxiJydsaJKf1G')
# mock timezone.now()
t2_now = timezone.now()
with patch.object(timezone, 'now', return_value=t2_now):
# get response
response = self.client.get('/artifacts/artifact/exporter/spreadsheet/xls/artifact/')
""" get file section """
# get artifactlist from response content
workbook = response.content
# open artifactlist directly from byte stream
artifactlist = xlrd.open_workbook(file_contents=workbook)
""" prepare objects section """
# get objects
artifact_1 = Artifact.objects.get(artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_1_all_attributes')
artifact_2 = Artifact.objects.get(artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_2_no_attributes')
# create lists for easier comparison with whole columns - artifactstatus
artifactstatus_id_list = ['ID']
artifactstatus_name_list = ['Artifactstatus']
artifactstatus_note_list = ['Note']
all_artifactstatus = Artifactstatus.objects.all().order_by('artifactstatus_name')
for artifactstatus_object in all_artifactstatus:
# the conversion to float was carried out, because otherwise the return values from the spreadsheet would have had to be converted to int, which would have been more time-consuming
artifactstatus_id_list.append(float(artifactstatus_object.artifactstatus_id))
artifactstatus_name_list.append(artifactstatus_object.artifactstatus_name)
if artifactstatus_object.artifactstatus_note:
artifactstatus_note_list.append(artifactstatus_object.artifactstatus_note)
else:
artifactstatus_note_list.append('---')
# create lists for easier comparison with whole columns - artifacttype
artifacttype_id_list = ['ID']
artifacttype_name_list = ['Artifacttype']
artifacttype_note_list = ['Note']
all_artifacttype = Artifacttype.objects.all().order_by('artifacttype_name')
for artifacttype_object in all_artifacttype:
# the conversion to float was carried out, because otherwise the return values from the spreadsheet would have had to be converted to int, which would have been more time-consuming
artifacttype_id_list.append(float(artifacttype_object.artifacttype_id))
artifacttype_name_list.append(artifacttype_object.artifacttype_name)
if artifacttype_object.artifacttype_note:
artifacttype_note_list.append(artifacttype_object.artifacttype_note)
else:
artifacttype_note_list.append('---')
# get sheets
sheet_artifacts = artifactlist.sheet_by_name('artifacts')
sheet_artifactstatus = artifactlist.sheet_by_name('artifactstatus')
sheet_artifacttype = artifactlist.sheet_by_name('artifacttype')
""" compare values section """
# compare number of rows and columns
self.assertEqual(sheet_artifacts.nrows, 6)
self.assertEqual(sheet_artifacts.ncols, 17)
self.assertEqual(sheet_artifactstatus.nrows, 14)
self.assertEqual(sheet_artifactstatus.ncols, 3)
self.assertEqual(sheet_artifacttype.nrows, 7)
self.assertEqual(sheet_artifacttype.ncols, 3)
# compare headlines
self.assertEqual(sheet_artifacts.row_values(0), ['Artifact ID', 'Artifact', 'System ID', 'System', 'Artifactstatus', 'Artifactpriority', 'Artifacttype', 'Source path', 'Storage path', 'Internal note','External note', 'Analysis result', 'MD5', 'SHA1', 'SHA256', 'Created', 'Modified'])
self.assertEqual(sheet_artifactstatus.row_values(0), ['ID', 'Artifactstatus', 'Note'])
self.assertEqual(sheet_artifacttype.row_values(0), ['ID', 'Artifacttype', 'Note'])
# compare content - artifact 1
self.assertEqual(int(sheet_artifacts.cell(1,0).value), artifact_1.artifact_id)
self.assertEqual(sheet_artifacts.cell(1,1).value, artifact_1.artifact_name)
self.assertEqual(int(sheet_artifacts.cell(1,2).value), artifact_1.system.system_id)
self.assertEqual(sheet_artifacts.cell(1,3).value, artifact_1.system.system_name)
self.assertEqual(sheet_artifacts.cell(1,4).value, artifact_1.artifactstatus.artifactstatus_name)
self.assertEqual(sheet_artifacts.cell(1,5).value, artifact_1.artifactpriority.artifactpriority_name)
self.assertEqual(sheet_artifacts.cell(1,6).value, artifact_1.artifacttype.artifacttype_name)
self.assertEqual(sheet_artifacts.cell(1,7).value, artifact_1.artifact_source_path)
self.assertEqual(sheet_artifacts.cell(1,8).value, artifact_1.artifact_storage_path)
self.assertEqual(sheet_artifacts.cell(1,9).value, 'artifact note for internal usage') # artifact_note_internal
self.assertEqual(sheet_artifacts.cell(1,10).value, 'artifact note for external usage') # artifact_note_external
self.assertEqual(sheet_artifacts.cell(1,11).value, 'artifact note for analysis result') # artifact_note_analysisresult
self.assertEqual(sheet_artifacts.cell(1,12).value, artifact_1.artifact_md5)
self.assertEqual(sheet_artifacts.cell(1,13).value, artifact_1.artifact_sha1)
self.assertEqual(sheet_artifacts.cell(1,14).value, artifact_1.artifact_sha256)
self.assertEqual(sheet_artifacts.cell(1,15).value, '2012-11-10 12:34')
self.assertEqual(sheet_artifacts.cell(1,16).value, '2012-11-10 12:34')
# compare content - artifact 2
self.assertEqual(int(sheet_artifacts.cell(2,0).value), artifact_2.artifact_id)
self.assertEqual(sheet_artifacts.cell(2,1).value, artifact_2.artifact_name)
self.assertEqual(int(sheet_artifacts.cell(2,2).value), artifact_2.system.system_id)
self.assertEqual(sheet_artifacts.cell(2,3).value, artifact_2.system.system_name)
self.assertEqual(sheet_artifacts.cell(2,4).value, artifact_2.artifactstatus.artifactstatus_name)
self.assertEqual(sheet_artifacts.cell(2,5).value, artifact_2.artifactpriority.artifactpriority_name)
self.assertEqual(sheet_artifacts.cell(2,6).value, artifact_2.artifacttype.artifacttype_name)
self.assertEqual(sheet_artifacts.cell(2,7).value, '')
self.assertEqual(sheet_artifacts.cell(2,8).value, artifact_2.artifact_storage_path)
self.assertEqual(sheet_artifacts.cell(2,9).value, '')
self.assertEqual(sheet_artifacts.cell(2,10).value, '')
self.assertEqual(sheet_artifacts.cell(2,11).value, '')
self.assertEqual(sheet_artifacts.cell(2,12).value, '')
self.assertEqual(sheet_artifacts.cell(2,13).value, '')
self.assertEqual(sheet_artifacts.cell(2,14).value, '')
self.assertEqual(sheet_artifacts.cell(2,15).value, '2009-08-07 23:45')
self.assertEqual(sheet_artifacts.cell(2,16).value, '2009-08-07 23:45')
# compare content - artifactstatus worksheet (whole columns)
self.assertEqual(sheet_artifactstatus.col_values(0), artifactstatus_id_list)
self.assertEqual(sheet_artifactstatus.col_values(1), artifactstatus_name_list)
self.assertEqual(sheet_artifactstatus.col_values(2), artifactstatus_note_list)
# compare content - artifacttype worksheet (whole columns)
self.assertEqual(sheet_artifacttype.col_values(0), artifacttype_id_list)
self.assertEqual(sheet_artifacttype.col_values(1), artifacttype_name_list)
self.assertEqual(sheet_artifacttype.col_values(2), artifacttype_note_list)
# compare content - metadata
self.assertEqual(sheet_artifacts.cell(4,0).value, 'Created:')
self.assertEqual(sheet_artifacts.cell(4,1).value, t2_now.strftime('%Y-%m-%d %H:%M'))
self.assertEqual(sheet_artifacts.cell(5,0).value, 'Created by:')
self.assertEqual(sheet_artifacts.cell(5,1).value, 'testuser_artifact_exporter_spreadsheet_xls')
def test_artifact_exporter_spreadsheet_xls_cron_complete_spreadsheet(self):
""" test exporter view """
""" modify config section """
# get and modify main config
main_config_model = MainConfigModel.objects.get(main_config_name = 'MainConfig')
main_config_model.cron_export_path = '/tmp'
main_config_model.cron_username = 'cron'
main_config_model.save()
# get and modify config to show all columns and sheets
artifact_exporter_spreadsheet_xls_config_model = ArtifactExporterSpreadsheetXlsConfigModel.objects.get(artifact_exporter_spreadsheet_xls_config_name='ArtifactExporterSpreadsheetXlsConfig')
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_id = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_system_id = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_system_name = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifactstatus = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifactpriority = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifacttype = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_source_path = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_storage_path = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_internal = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_external = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_note_analysisresult = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_md5 = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_sha1 = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_sha256 = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_create_time = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_artifact_modify_time = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_worksheet_artifactstatus = True
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_worksheet_artifacttype = True
artifact_exporter_spreadsheet_xls_config_model.save()
# get object
artifactstatus_3 = Artifactstatus.objects.get(artifactstatus_name = 'artifactstatus_3')
# add artifactstatus to choice for export
artifact_exporter_spreadsheet_xls_config_model.artifactlist_xls_choice_artifactstatus.add(artifactstatus_3)
""" call view section """
# login testuser
self.client.login(username='testuser_artifact_exporter_spreadsheet_xls', password='LTzoNHIdxiJydsaJKf1G')
# mock timezone.now()
t3_now = timezone.now()
with patch.object(timezone, 'now', return_value=t3_now):
# create spreadsheet without GET by directly calling the function
artifact_cron()
""" get file section """
# refresh config
main_config_model.refresh_from_db()
# get time for output file
filetime = t3_now.strftime('%Y%m%d_%H%M')
# prepare output file path
output_file_path = main_config_model.cron_export_path + '/' + filetime + '_artifacts.xls'
# open file from temp folder
xls_disk = xlrd.open_workbook(output_file_path)
""" prepare objects section """
# get objects
artifact_1 = Artifact.objects.get(artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_1_all_attributes')
artifact_2 = Artifact.objects.get(artifact_name = 'artifact_exporter_spreadsheet_xls_artifact_2_no_attributes')
# create lists for easier comparison with whole columns - artifactstatus
artifactstatus_id_list = ['ID']
artifactstatus_name_list = ['Artifactstatus']
artifactstatus_note_list = ['Note']
all_artifactstatus = Artifactstatus.objects.all().order_by('artifactstatus_name')
for artifactstatus_object in all_artifactstatus:
# the conversion to float was carried out, because otherwise the return values from the spreadsheet would have had to be converted to int, which would have been more time-consuming
artifactstatus_id_list.append(float(artifactstatus_object.artifactstatus_id))
artifactstatus_name_list.append(artifactstatus_object.artifactstatus_name)
if artifactstatus_object.artifactstatus_note:
artifactstatus_note_list.append(artifactstatus_object.artifactstatus_note)
else:
artifactstatus_note_list.append('---')
# create lists for easier comparison with whole columns - artifacttype
artifacttype_id_list = ['ID']
artifacttype_name_list = ['Artifacttype']
artifacttype_note_list = ['Note']
all_artifacttype = Artifacttype.objects.all().order_by('artifacttype_name')
for artifacttype_object in all_artifacttype:
# the conversion to float was carried out, because otherwise the return values from the spreadsheet would have had to be converted to int, which would have been more time-consuming
artifacttype_id_list.append(float(artifacttype_object.artifacttype_id))
artifacttype_name_list.append(artifacttype_object.artifacttype_name)
if artifacttype_object.artifacttype_note:
artifacttype_note_list.append(artifacttype_object.artifacttype_note)
else:
artifacttype_note_list.append('---')
# get sheets
sheet_artifacts = xls_disk.sheet_by_name('artifacts')
sheet_artifactstatus = xls_disk.sheet_by_name('artifactstatus')
sheet_artifacttype = xls_disk.sheet_by_name('artifacttype')
""" compare values section """
# compare number of rows and columns
self.assertEqual(sheet_artifacts.nrows, 6)
self.assertEqual(sheet_artifacts.ncols, 17)
self.assertEqual(sheet_artifactstatus.nrows, 14)
self.assertEqual(sheet_artifactstatus.ncols, 3)
self.assertEqual(sheet_artifacttype.nrows, 7)
self.assertEqual(sheet_artifacttype.ncols, 3)
# compare headlines
self.assertEqual(sheet_artifacts.row_values(0), ['Artifact ID', 'Artifact', 'System ID', 'System', 'Artifactstatus', 'Artifactpriority', 'Artifacttype', 'Source path', 'Storage path', 'Internal note','External note', 'Analysis result', 'MD5', 'SHA1', 'SHA256', 'Created', 'Modified'])
self.assertEqual(sheet_artifactstatus.row_values(0), ['ID', 'Artifactstatus', 'Note'])
self.assertEqual(sheet_artifacttype.row_values(0), ['ID', 'Artifacttype', 'Note'])
# compare content - artifact 1
self.assertEqual(int(sheet_artifacts.cell(1,0).value), artifact_1.artifact_id)
self.assertEqual(sheet_artifacts.cell(1,1).value, artifact_1.artifact_name)
self.assertEqual(int(sheet_artifacts.cell(1,2).value), artifact_1.system.system_id)
self.assertEqual(sheet_artifacts.cell(1,3).value, artifact_1.system.system_name)
self.assertEqual(sheet_artifacts.cell(1,4).value, artifact_1.artifactstatus.artifactstatus_name)
self.assertEqual(sheet_artifacts.cell(1,5).value, artifact_1.artifactpriority.artifactpriority_name)
self.assertEqual(sheet_artifacts.cell(1,6).value, artifact_1.artifacttype.artifacttype_name)
self.assertEqual(sheet_artifacts.cell(1,7).value, artifact_1.artifact_source_path)
self.assertEqual(sheet_artifacts.cell(1,8).value, artifact_1.artifact_storage_path)
self.assertEqual(sheet_artifacts.cell(1,9).value, 'artifact note for internal usage') # artifact_note_internal
self.assertEqual(sheet_artifacts.cell(1,10).value, 'artifact note for external usage') # artifact_note_external
self.assertEqual(sheet_artifacts.cell(1,11).value, 'artifact note for analysis result') # artifact_note_analysisresult
self.assertEqual(sheet_artifacts.cell(1,12).value, artifact_1.artifact_md5)
self.assertEqual(sheet_artifacts.cell(1,13).value, artifact_1.artifact_sha1)
self.assertEqual(sheet_artifacts.cell(1,14).value, artifact_1.artifact_sha256)
self.assertEqual(sheet_artifacts.cell(1,15).value, '2012-11-10 12:34')
self.assertEqual(sheet_artifacts.cell(1,16).value, '2012-11-10 12:34')
# compare content - artifact 2
self.assertEqual(int(sheet_artifacts.cell(2,0).value), artifact_2.artifact_id)
self.assertEqual(sheet_artifacts.cell(2,1).value, artifact_2.artifact_name)
self.assertEqual(int(sheet_artifacts.cell(2,2).value), artifact_2.system.system_id)
self.assertEqual(sheet_artifacts.cell(2,3).value, artifact_2.system.system_name)
self.assertEqual(sheet_artifacts.cell(2,4).value, artifact_2.artifactstatus.artifactstatus_name)
self.assertEqual(sheet_artifacts.cell(2,5).value, artifact_2.artifactpriority.artifactpriority_name)
self.assertEqual(sheet_artifacts.cell(2,6).value, artifact_2.artifacttype.artifacttype_name)
self.assertEqual(sheet_artifacts.cell(2,7).value, '')
self.assertEqual(sheet_artifacts.cell(2,8).value, artifact_2.artifact_storage_path)
self.assertEqual(sheet_artifacts.cell(2,9).value, '')
self.assertEqual(sheet_artifacts.cell(2,10).value, '')
self.assertEqual(sheet_artifacts.cell(2,11).value, '')
self.assertEqual(sheet_artifacts.cell(2,12).value, '')
self.assertEqual(sheet_artifacts.cell(2,13).value, '')
self.assertEqual(sheet_artifacts.cell(2,14).value, '')
self.assertEqual(sheet_artifacts.cell(2,15).value, '2009-08-07 23:45')
self.assertEqual(sheet_artifacts.cell(2,16).value, '2009-08-07 23:45')
# compare content - artifactstatus worksheet (whole columns)
self.assertEqual(sheet_artifactstatus.col_values(0), artifactstatus_id_list)
self.assertEqual(sheet_artifactstatus.col_values(1), artifactstatus_name_list)
self.assertEqual(sheet_artifactstatus.col_values(2), artifactstatus_note_list)
# compare content - artifacttype worksheet (whole columns)
self.assertEqual(sheet_artifacttype.col_values(0), artifacttype_id_list)
self.assertEqual(sheet_artifacttype.col_values(1), artifacttype_name_list)
self.assertEqual(sheet_artifacttype.col_values(2), artifacttype_note_list)
# compare content - metadata
self.assertEqual(sheet_artifacts.cell(4,0).value, 'Created:')
self.assertEqual(sheet_artifacts.cell(4,1).value, t3_now.strftime('%Y-%m-%d %H:%M'))
self.assertEqual(sheet_artifacts.cell(5,0).value, 'Created by:')
self.assertEqual(sheet_artifacts.cell(5,1).value, 'cron')
| 61.268714 | 293 | 0.740986 |
7953ceede916ce16308fd64918d79c75fece3e0d | 12,594 | py | Python | python/tensorflow_sample/Ver2.x/06_optimizer/trainer/trainer.py | ryoma-jp/samples | 85c0be62f9de1194121d225adee12c9810229960 | [
"Apache-2.0"
] | null | null | null | python/tensorflow_sample/Ver2.x/06_optimizer/trainer/trainer.py | ryoma-jp/samples | 85c0be62f9de1194121d225adee12c9810229960 | [
"Apache-2.0"
] | null | null | null | python/tensorflow_sample/Ver2.x/06_optimizer/trainer/trainer.py | ryoma-jp/samples | 85c0be62f9de1194121d225adee12c9810229960 | [
"Apache-2.0"
] | null | null | null | #! -*- coding: utf-8 -*-
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#---------------------------------
# クラス; 学習モジュール基底クラス
#---------------------------------
class Trainer():
# --- コンストラクタ ---
def __init__(self, output_dir=None, model_file=None, optimizer='adam'):
# --- 出力ディレクトリ作成 ---
self.output_dir = output_dir
if (output_dir is not None):
os.makedirs(output_dir, exist_ok=True)
# --- モデル構築 ---
def _load_model(model_file):
if (model_file is not None):
return None
else:
return None
self.model = _load_model(model_file)
if (self.model is not None):
self._compile_model(optimizer)
return
# --- モデルの構成 ---
# * lr_decay: 学習率減衰する(=True),しない(=False; default)を指定
def _compile_model(self, optimizer='adam'):
if (optimizer == 'adam'):
opt = tf.keras.optimizers.Adam()
elif (optimizer == 'sgd'):
opt = tf.keras.optimizers.SGD()
elif (optimizer == 'adam_lrs'):
# --- parameters ---
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay
# but, initial learning rate is default of Adam()
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.001,
decay_steps=1000,
decay_rate=0.90,
staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
elif (optimizer == 'sgd_lrs'):
# --- parameters ---
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay
# but, initial learning rate is default of Adam()
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.01,
decay_steps=1000,
decay_rate=0.9,
staircase=True)
opt = tf.keras.optimizers.SGD(learning_rate=lr_schedule)
else:
print('[ERROR] Unknown optimizer: {}'.format(optimizer))
quit()
self.model.compile(
optimizer=opt,
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
return
# --- 学習 ---
def fit(self, x_train, y_train, x_val=None, y_val=None, x_test=None, y_test=None,
da_enable=False,
batch_size=32, epochs=1000000):
# --- 学習 ---
os.makedirs(os.path.join(self.output_dir, 'checkpoints'), exist_ok=True)
checkpoint_path = os.path.join(self.output_dir, 'checkpoints', 'model.ckpt')
cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)
es_callback = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
if (da_enable):
# --- no tuning ---
datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
else:
datagen = ImageDataGenerator()
datagen.fit(x_train)
if ((x_val is not None) and (y_val is not None)):
history = self.model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train)/batch_size, validation_data=(x_val, y_val),
epochs=epochs, callbacks=[cp_callback, es_callback])
else:
history = self.model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train)/batch_size, validation_split=0.2,
epochs=epochs, callbacks=[cp_callback, es_callback])
# --- 学習結果を評価 ---
if ((x_test is not None) and (y_test is not None)):
test_loss, test_acc = self.model.evaluate(x_test, y_test, verbose=2)
print('Test Accuracy: {}'.format(test_acc))
print('Test Loss: {}'.format(test_loss))
# --- メトリクスを保存 ---
metrics = history.history
os.makedirs(os.path.join(self.output_dir, 'metrics'), exist_ok=True)
df_metrics = pd.DataFrame(metrics)
df_metrics.to_csv(os.path.join(self.output_dir, 'metrics', 'metrics.csv'), index_label='epoch')
epoch = df_metrics.index.values
for column in df_metrics.columns:
plt.figure()
plt.plot(epoch, df_metrics[column])
plt.xlabel(column)
plt.ylabel('epoch')
plt.grid(True)
plt.tight_layout()
graph_name = os.path.join(self.output_dir, 'metrics', '{}.png'.format(column))
plt.savefig(graph_name)
plt.close()
return
# --- 推論 ---
def predict(self, x_test):
predictions = self.model.predict(x_test)
return predictions
# --- モデル保存 ---
def save_model(self):
# --- 保存先ディレクトリ作成 ---
model_dir = os.path.join(self.output_dir, 'models')
os.makedirs(os.path.join(model_dir, 'checkpoint'), exist_ok=True)
os.makedirs(os.path.join(model_dir, 'saved_model'), exist_ok=True)
os.makedirs(os.path.join(model_dir, 'hdf5'), exist_ok=True)
# --- checkpoint ---
self.model.save_weights(os.path.join(model_dir, 'checkpoint', 'model.ckpt'))
# --- saved_model ---
self.model.save(os.path.join(model_dir, 'saved_model'))
# --- hdf5 ---
self.model.save(os.path.join(model_dir, 'hdf5', 'model.h5'))
return
# --- ラベルインデックス取得 ---
def GetLabelIndex(self, label, onehot=True):
if (onehot):
label = np.argmax(label, axis=1)
n_category = max(label)+1
return np.array([np.arange(len(label))[label==i] for i in range(n_category)])
#---------------------------------
# クラス; ResNet学習モジュール
#---------------------------------
class TrainerResNet(Trainer):
# --- コンストラクタ ---
def __init__(self, input_shape, classes, output_dir=None, model_type='custom', optimizer='adam'):
# --- Residual Block ---
# * アプリケーションからkeras.applications.resnet.ResNetにアクセスできない為,
# 必要なモジュールをTensorFlow公式からコピー
# https://github.com/tensorflow/tensorflow/blob/v2.5.0/tensorflow/python/keras/applications/resnet.py#L212
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
bn_axis = 3
if conv_shortcut:
shortcut = keras.layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(x)
shortcut = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = keras.layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)
x = keras.layers.Add(name=name + '_add')([shortcut, x])
x = keras.layers.Activation('relu', name=name + '_out')(x)
return x
# --- Residual Block stack ---
# * アプリケーションからkeras.applications.resnet.ResNetにアクセスできない為,
# 必要なモジュールをTensorFlow公式からコピー
# https://github.com/tensorflow/tensorflow/blob/v2.5.0/tensorflow/python/keras/applications/resnet.py#L257
def stack1(x, filters, blocks, stride1=2, name=None):
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
# --- モデル構築 ---
# * stack_fn()の関数ポインタを引数に設定してカスタマイズ
def _load_model(input_shape, classes, stack_fn):
input = keras.layers.Input(shape=input_shape)
bn_axis = 3
x = keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(input)
x = keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
x = keras.layers.Activation('relu', name='conv1_relu')(x)
x = keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
x = keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = keras.layers.Dense(classes, activation='softmax', name='predictions')(x)
model = keras.models.Model(input, x)
model.summary()
return model
def _load_model_resnet50(input_shape, classes, dbg_mode=1):
# --- TensorFlowのResNet50のモデル ---
# https://www.tensorflow.org/api_docs/python/tf/keras/applications/resnet50/ResNet50
# dbg_mode=0: original ResNet50, dbg_mode=11: custom ResNet50
if (dbg_mode == 0):
print('[INFO] Load ResNet50 model from keras.applications')
model = keras.applications.resnet50.ResNet50()
elif (dbg_mode == 1):
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
return stack1(x, 512, 3, name='conv5')
print('[INFO] Load ResNet50 model (custom implementation)')
model = _load_model(input_shape, classes, stack_fn)
return model
# --- 基底クラスの初期化 ---
super().__init__(output_dir)
# --- モデル構築 ---
if (model_type == 'custom'):
def stack_fn(x):
x = stack1(x, 32, 3, stride1=1, name='conv2')
return stack1(x, 64, 4, name='conv3')
self.model = _load_model(input_shape, classes, stack_fn)
self._compile_model(optimizer=optimizer)
elif (model_type == 'resnet50'):
self.model = _load_model_resnet50(input_shape, classes, dbg_mode=1)
self._compile_model(optimizer=optimizer)
else:
print('[ERROR] Unknown model_type: {}'.format(model_type))
return
if (self.output_dir is not None):
keras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)
return
#---------------------------------
# クラス; CNN学習モジュール
#---------------------------------
class TrainerCNN(Trainer):
# --- コンストラクタ ---
def __init__(self, input_shape, output_dir=None, optimizer='adam'):
# --- モデル構築 ---
def _load_model(input_shape):
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Flatten(input_shape=input_shape))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()
return model
# --- 基底クラスの初期化 ---
super().__init__(output_dir)
# --- モデル構築 ---
self.model = _load_model(input_shape)
self._compile_model(optimizer=optimizer)
if (self.output_dir is not None):
keras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)
return
#---------------------------------
# クラス; MLP学習モジュール
#---------------------------------
class TrainerMLP(Trainer):
# --- コンストラクタ ---
def __init__(self, input_shape, output_dir=None, optimizer='adam'):
# --- モデル構築 ---
def _load_model(input_shape):
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=input_shape))
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()
return model
# --- 基底クラスの初期化 ---
super().__init__(output_dir)
# --- モデル構築 ---
self.model = _load_model(input_shape)
self._compile_model(optimizer=optimizer)
if (self.output_dir is not None):
keras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)
return
#---------------------------------
# メイン処理; Trainerモジュールテスト
#---------------------------------
def main():
import argparse
def _argparse():
parser = argparse.ArgumentParser(description='Trainerモジュールテスト\n'
' * test_mode=\'ResNet\': ResNetのモデル構造確認(ResNet50の構造をTensorFlow公開モデルと比較)',
formatter_class=argparse.RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--test_mode', dest='test_mode', type=str, default='ResNet', required=False, \
help='テストモード(ResNet)')
args = parser.parse_args()
return args
# --- 引数処理 ---
args = _argparse()
print(args.test_mode)
# --- モジュールテスト ---
if (args.test_mode == 'ResNet'):
trainer = TrainerResNet([224, 224, 3], 1000, output_dir=None, model_type='resnet50')
else:
print('[ERROR] Unknown test_mode: {}'.format(args.test_mode))
return
if __name__ == '__main__':
main()
| 33.584 | 114 | 0.662061 |
7953cf3df110a0380fd68c85de561c8dcf9092bd | 3,845 | py | Python | tordatahub/tests/blob/blob_topic_pub.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/tests/blob/blob_topic_pub.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/tests/blob/blob_topic_pub.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import traceback
from tordatahub import DataHub
from tordatahub.utils import Configer
from tordatahub.models import Topic, RecordType, FieldType, RecordSchema, BlobRecord, CursorType
from tordatahub.errors import DatahubException, ObjectAlreadyExistException
configer = Configer('../tordatahub.ini')
access_id = configer.get('tordatahub', 'access_id', '')
access_key = configer.get('tordatahub', 'access_key', '')
endpoint = configer.get('tordatahub', 'endpoint', '')
project_name = configer.get('tordatahub', 'project_name', 'pydatahub_project_test')
topic_name = configer.get('tordatahub', 'topic_name', 'pydatahub_blob_topic_test')
print "======================================="
print "access_id: %s" % access_id
print "access_key: %s" % access_key
print "endpoint: %s" % endpoint
print "project_name: %s" % project_name
print "topic_name: %s" % topic_name
print "=======================================\n\n"
if not access_id or not access_key or not endpoint:
print "access_id and access_key and endpoint must be set!"
sys.exit(-1)
dh = DataHub(access_id, access_key, endpoint)
topic = Topic(name=topic_name)
topic.project_name = project_name
topic.shard_count = 3
topic.life_cycle = 7
topic.record_type = RecordType.BLOB
try:
dh.create_topic(topic)
print "create topic success!"
print "=======================================\n\n"
except ObjectAlreadyExistException, e:
print "topic already exist!"
print "=======================================\n\n"
except Exception, e:
print traceback.format_exc()
sys.exit(-1)
try:
# block等待所有shard状态ready
if dh.wait_shards_ready(project_name, topic_name):
print "shards all ready!!!"
else:
print "some shards not ready!!!"
sys.exit(-1)
print "=======================================\n\n"
topic = dh.get_topic(topic_name, project_name)
print "get topic suc! topic=%s" % str(topic)
if topic.record_type != RecordType.BLOB:
print "topic type illegal!"
sys.exit(-1)
print "=======================================\n\n"
shards = dh.list_shards(project_name, topic_name)
for shard in shards:
print shard
print "=======================================\n\n"
records = []
data = None
with open('tordatahub.png', 'rb') as f:
data = f.read()
record0 = BlobRecord(blobdata=data)
record0.shard_id = '0'
records.append(record0)
record1 = BlobRecord(blobdata=data)
record1.shard_id = '1'
records.append(record1)
record2 = BlobRecord(blobdata=data)
record2.shard_id = '2'
records.append(record2)
failed_indexs = dh.put_records(project_name, topic_name, records)
print "put blob %d records, failed list: %s" %(len(records), failed_indexs)
# failed_indexs如果非空最好对failed record再进行重试
print "=======================================\n\n"
except DatahubException, e:
print traceback.format_exc()
sys.exit(-1)
else:
sys.exit(-1)
| 33.434783 | 96 | 0.650455 |
7953cf45598e56833d175d017d7466830cd8c281 | 8,472 | py | Python | nova/api/openstack/compute/contrib/os_tenant_networks.py | TMaddox/nova | e5c169d15528a8e2eadb8eca668ea0d183cf8648 | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/contrib/os_tenant_networks.py | TMaddox/nova | e5c169d15528a8e2eadb8eca668ea0d183cf8648 | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/contrib/os_tenant_networks.py | TMaddox/nova | e5c169d15528a8e2eadb8eca668ea0d183cf8648 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo_config import cfg
from oslo_log import log as logging
import six
import webob
from webob import exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
import nova.network
from nova import quota
CONF = cfg.CONF
os_network_opts = [
cfg.BoolOpt("enable_network_quota",
default=False,
help='Enables or disables quota checking for tenant '
'networks'),
cfg.StrOpt('use_neutron_default_nets',
default="False",
help='Control for checking for default networks'),
cfg.StrOpt('neutron_default_tenant_id',
default="default",
help='Default tenant id when creating neutron '
'networks'),
cfg.IntOpt('quota_networks',
default=3,
help='Number of private networks allowed per project'),
]
CONF.register_opts(os_network_opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
def network_dict(network):
# NOTE(danms): Here, network should be an object, which could have come
# from neutron and thus be missing most of the attributes. Providing a
# default to get() avoids trying to lazy-load missing attributes.
return {"id": network.get("uuid", None) or network.get("id", None),
"cidr": str(network.get("cidr", None)),
"label": network.get("label", None)}
class NetworkController(object):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception(_LE("Failed to get default networks"))
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in six.iteritems(networks)]
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
reservation = None
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_LE("Failed to update usages deallocating "
"network."))
def _rollback_quota(reservation):
if CONF.enable_network_quota and reservation:
QUOTAS.rollback(context, reservation)
try:
self.network_api.delete(context, id)
except exception.PolicyNotAuthorized as e:
_rollback_quota(reservation)
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.NetworkInUse as e:
_rollback_quota(reservation)
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
_rollback_quota(reservation)
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
response = webob.Response(status_int=202)
return response
def create(self, req, body):
if not body:
raise exc.HTTPUnprocessableEntity()
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = {k: network.get(k) for k in keys}
if not network.get("label"):
msg = _("Network label is required")
raise exc.HTTPBadRequest(explanation=msg)
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class Os_tenant_networks(extensions.ExtensionDescriptor):
"""Tenant-based Network Management Extension."""
name = "OSTenantNetworks"
alias = "os-tenant-networks"
namespace = ("http://docs.openstack.org/compute/"
"ext/os-tenant-networks/api/v2")
updated = "2012-03-07T14:46:43Z"
def get_resources(self):
ext = extensions.ResourceExtension('os-tenant-networks',
NetworkController())
return [ext]
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks'))
| 37.157895 | 78 | 0.612016 |
7953cff5e22045880a88f583d63dfa688e8712e4 | 72,532 | py | Python | graph_compression/compression_lib/compression_op.py | pervrosen/google-research | 70e862c1463969f4e44f87f51e398a8b3aa48fd5 | [
"Apache-2.0"
] | 2 | 2021-09-04T09:08:38.000Z | 2021-09-04T09:08:44.000Z | graph_compression/compression_lib/compression_op.py | xingyul/google-research | 45eca1118642cc1257824856ac6e1ab0aa7bf299 | [
"Apache-2.0"
] | null | null | null | graph_compression/compression_lib/compression_op.py | xingyul/google-research | 45eca1118642cc1257824856ac6e1ab0aa7bf299 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matrix compression operator.
Helper functions to have an automated process to take any matrix compression
algorithm and create a tensorflow operator that can be applied on a tensorflow
matrix variable to compress it on the fly during training.
The class MatrixCompressorInferface can be used to implement any matrix
compression algorithm in the method static_matrix_compressor. The other class
CompressionOpInterface is used to create a tensorflow operator that injects
any matrix compression method dynamically into a tensorflow layer. This is
done by specifying in the spec during initialization a
MatrixCompressorInferface object that implements the method.
The get_apply_compression_op return such a tensorflow operator.
Further a tensorflow operator to update variables needs to be invoked
periodically depending on the method. Such an operator is created using
the get_update_op method.
Derived classes of these interfaces can be used to create compression OPs that
implement different compression methods. Such OPs have been implemented using
derived classes such as LowRankDecompMatrixCompressor, CompressionOp for low
rank decomposition, SimhashMatrixCompressor, SimhashCompressionOp for simhash,
DLMatrixCompressor for dictionary learning.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from graph_compression.compression_lib import compression_op_utils as comp_op_utils
from model_pruning.python import hparam as contrib_hparam
class MatrixCompressorInferface(object):
"""Interface for any matrix compressor algorithm.
This MatrixCompressorInferface class can be implemented by any third party to
implement any compression algorithm.
"""
@abc.abstractmethod
def __init__(self, spec):
pass
def static_matrix_compressor(self, a_matrix):
"""Implements the matrix compression algorithm of choice to compress.
Args:
a_matrix: input matrix.
Returns:
The factor(s) or any compressed representation of a_matrix.
"""
raise NotImplementedError()
def default_matrix(self):
"""Returns default matrix for initialization.
Size is taken from spec.
"""
raise NotImplementedError()
class LowRankDecompMatrixCompressor(MatrixCompressorInferface):
"""Low rank decomposition compressor.
Implements matrix compression interface for the low rank decomposition
algorithm.
"""
def __init__(self, spec):
"""Initializer.
Args:
spec: hparams object with default value given by
self.get_default_hparams().
"""
super(LowRankDecompMatrixCompressor, self).__init__(spec)
self._spec = spec
self.uncompressed_size = 0
self.compressed_size = 0
def get_spec(self):
return self._spec
@staticmethod
def get_default_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the low-rank matrix decompressor specification.
rank: integer
rank of the low-rank decomposition that is performed.
num_rows: integer
number of rows of given matrix.
num_cols: integer
number of columns of given matrix.
use_tpu: False
experimental flag; indicates whether to use tensorflow operations (True)
or python operations (False). For TPU, TF operations are preferred.
compressor_option: integer
indicates what type of factorization (if any) is used.
is_b_matrix_trainable: bool
indicates whether the b_matrix matrix in the factorization is to be
trained.
is_c_matrix_trainable: bool
indicates whether the c_matrix matrix in the factorization is to be
trained.
Returns:
tf.HParams object initialized to default values.
"""
return contrib_hparam.HParams(
name='model_compression',
rank=100,
num_rows=10,
num_cols=10,
use_tpu=False,
compressor_option=0,
is_b_matrix_trainable=True,
is_c_matrix_trainable=True,
is_d_matrix_trainable=True,
is_c_matrix_present=True,
block_size=1,
pruning_fraction=0.0,
use_lsh=False)
def static_matrix_compressor(self, a_matrix):
"""Low-rank decomposition of a_matrix.
Args:
a_matrix: input matrix.
Returns:
A list [b_matrix,c_matrix] which is the low-rank decomposition of
a_matrix. Rank is taken from spec.rank.
"""
u, s, vh = np.linalg.svd(a_matrix, full_matrices=False)
logging.info(
'Inside static_matrix_compressor: u,s,vh shapes are: %s, %s, %s',
u.shape, s.shape, vh.shape)
# If matrix dimension is smaller than rank specified then adjust rank
rank = comp_op_utils.compute_compressed_rank_from_matrix_shape(
a_matrix.shape, self._spec.rank)
b_matrix = u[:, :rank]
c_matrix = vh[:rank, :]
s_mat = np.diag(np.sqrt(s[:rank]))
b_matrix = np.matmul(b_matrix, s_mat)
c_matrix = np.matmul(s_mat, c_matrix)
logging.info(
'Inside static_matrix_compressor: a_matrix,b_matrix,c_matrix shapes '
'are: %s, %s, %s', a_matrix.shape, b_matrix.shape, c_matrix.shape)
self.uncompressed_size = a_matrix.size
self.compressed_size = b_matrix.size + c_matrix.size
return [b_matrix, c_matrix]
def tpu_matrix_compressor(self, a_matrix):
"""Low-rank decomposition of a_matrix using tpu operations.
For training on tpus, we only use basic tf operations (as py_func is not
supported).
Args:
a_matrix: input matrix.
Returns:
A list of two matrices [b_matrix,c_matrix] which is the low-rank
decomposition of a_matrix. Rank is taken from spec.rank.
"""
s, u, v = tf.linalg.svd(a_matrix, full_matrices=False)
logging.info('Inside tpu_matrix_compressor: u,s,v shapes are: %s, %s, %s',
u.shape, s.shape, v.shape)
rank = comp_op_utils.compute_compressed_rank_from_matrix_shape(
tuple(a_matrix.shape.dims), self._spec.rank)
b_matrix = u[:, :rank]
c_matrix = tf.transpose(a=v)[:rank, :]
s_mat = tf.linalg.tensor_diag(tf.sqrt(s[:rank]))
b_matrix = tf.matmul(b_matrix, s_mat)
c_matrix = tf.matmul(s_mat, c_matrix)
logging.info(
'Inside tpu_matrix_compressor: a_matrix,b_matrix,c_matrix'
'shapes are: %s, %s, %s', a_matrix.shape, b_matrix.shape,
c_matrix.shape)
return [b_matrix, c_matrix]
def default_matrix(self):
"""Returns default matrix of zeros of size specified in spec."""
a_matrix = np.zeros(shape=[self._spec.num_rows, self._spec.num_cols])
return a_matrix
class CompressionOpInterface(object):
"""Interface for a compression op.
Class to take a matrix compression algorithm and create a tensorflow
compression operator to inject that compression dynamically during training.
The compression algorithm is specified using an object of
MatrixCompressorInferface class.
"""
@abc.abstractmethod
def __init__(self, scope='default_scope', spec=None, global_step=None):
pass
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = tf.train.get_or_create_global_step()
logging.info('graph_global_step: %s', graph_global_step)
return tf.cast(graph_global_step, tf.int32)
def get_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
scope='default_scope'):
"""Returns compressed tensorflow operator.
Does it for variable a_matrix_tfvar for compression method specified in
matrix_compressor.
Args:
a_matrix_tfvar: TF variable representing a tensor variable in a model.
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm.
scope: TF scope used for creating new TF variables.
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
raise NotImplementedError()
class CompressionOp(CompressionOpInterface):
"""Implements a compression OP.
Does this based on any matrix factorization compression algorithm by
replacing a variable a_matrix by alpha*a_matrix +
(1-alpha)b_matrix*c_matrix. See the doc linked in the directory README for
details.
"""
def __init__(self, scope='default_scope', spec=None, global_step=None):
"""Initializer.
Args:
scope: TF scope used for creating new TF variables.
spec: compression hyper parameters default value given by
self.get_default_hparams().
global_step: tf variable that has the global step.
"""
super(CompressionOp, self).__init__(scope, spec, global_step)
# Compression specification
self._spec = spec if spec else self.get_default_hparams()
logging.info('Compression spec in init CompressionOp is: ')
self.print_hparams()
# Sanity check for compression hparams
self._validate_spec()
self._global_step = self._setup_global_step(global_step)
# public member variables to track the compressor, the variables and
# other tf nodes corresponding to this OP.
self.matrix_compressor = None
self.a_matrix_tfvar = None
self.b_matrix_tfvar = None
self.c_matrix_tfvar = None
self.alpha = None
self.final_op = None
self.update_op = None
self._last_alpha_update_step = self._setup_last_alpha_update_step()
self._last_update_step = -1
self._alpha_update_tf_op = None
self.uncompressed_size = 0
self.compressed_size = 0
self.run_update_count = 0
self.last_alpha_value = 1
@staticmethod
def get_default_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the compression specification. Used for adding summaries and ops
under a common tensorflow name_scope.
alpha_decrement_value: float
a positive real number by which alpha is decremented at each update.
begin_compression_step: integer
the global step at which to begin compression.
end_compression_step: integer
the global step at which to terminate compression. Defaults to -1
implying that compression continues till the training stops.
use_tpu: False
indicates whether to use TPU.
compression_option: integer
indicates what type of factorization (if any) is used.
rank: integer
indicates what type of factorization (if any) is used.
update_option: integer
indicates how the update logic is being run. More specifically:
0: TF_UPDATE - run the update logic in TF; needed when using GPU/TPU
1: PYTHON_UPDATE - run the update logic in regular python as opposed
to TF.
2: TF_AND_PYTHON_UPDATE - run the update logic in TF and in regular
python.
3: NO_UPDATE - no alpha update; not required for some compression ops.
Returns:
tf.HParams object initialized to default values.
"""
return contrib_hparam.HParams(
name='model_compression',
alpha_decrement_value=0.01,
begin_compression_step=0,
end_compression_step=-1,
compression_frequency=10,
use_tpu=False,
compression_option=comp_op_utils.CompressionOptions
.LOWRANK_MATRIX_COMPRESSION,
rank=7,
update_option=comp_op_utils.UpdateOptions.TF_UPDATE,
run_update_interval_check=1,
block_size=1,
pruning_fraction=0.0,
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
block_dims_map=[''],
threshold_decay=0.0,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0.0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3.0,
gradient_decay_rate=0.99,
prune_option='weight',
add_summary=True)
def add_compression_summaries(self):
"""Adds summaries of alpha value, new variables, and last update step."""
with tf.compat.v1.name_scope(self._spec.name + '_summaries'):
tf.compat.v2.summary.scalar(
self._last_alpha_update_step.op.name + '/last_alpha_update_step',
self._last_alpha_update_step)
tf.compat.v2.summary.scalar(self.alpha.op.name + '/alpha', self.alpha)
tf.compat.v2.summary.scalar(
self.a_matrix_tfvar.op.name + '/a_matrix_norm',
tf.norm(tensor=self.a_matrix_tfvar))
tf.compat.v2.summary.scalar(
self.b_matrix_tfvar.op.name + '/b_matrix_norm',
tf.norm(tensor=self.b_matrix_tfvar))
tf.compat.v2.summary.scalar(
self.c_matrix_tfvar.op.name + '/c_matrix_norm',
tf.norm(tensor=self.c_matrix_tfvar))
def _setup_last_alpha_update_step(self):
"""Setup to track last alpha update step."""
with tf.compat.v1.variable_scope(
self._spec.name, use_resource=True) as scope:
try:
last_alpha_update_step = tf.compat.v1.get_variable(
'last_alpha_update_step',
initializer=-1,
trainable=False,
dtype=tf.int32)
except ValueError:
scope.reuse_variables()
last_alpha_update_step = tf.compat.v1.get_variable(
'last_alpha_update_step', dtype=tf.int32)
return last_alpha_update_step
def _alpha_update_op(self):
"""Update alpha along with last_alpha_update_step."""
with tf.compat.v1.name_scope(self._spec.name):
with tf.control_dependencies([
tf.compat.v1.assign(
self._last_alpha_update_step,
tf.cast(self._global_step, tf.int32),
name='last_alpha_update_step_assign')
]):
with tf.control_dependencies([self._alpha_assign_op()]):
logging.info('Updating alpha.')
return tf.no_op('alpha_update')
def _alpha_assign_op(self):
new_alpha = tf.maximum(self.alpha - self._spec.alpha_decrement_value, 0)
alpha_assign_op = tf.compat.v1.assign(
self.alpha, new_alpha, name='alpha_assign_op')
return alpha_assign_op
def _compressor_op(self, matrix_compressor, a_matrix_tfvar):
"""Creates compressor op based on matrix_compressor.
Meant to create the factors once at begin_compression_step.
Args:
matrix_compressor: specifies the matrix compressor object.
a_matrix_tfvar: the tf tensor to be compressed.
Returns:
A TF op which does the compressor update on b and c.
"""
# py_func is not supported on TPU so need non py_func implementation
use_tpu = self._spec.use_tpu
# Seeing some tf.py_func error because of which the
# following may be needed, so enforcing TF operation updates.
if use_tpu:
[b_matrix_out,
c_matrix_out] = matrix_compressor.tpu_matrix_compressor(a_matrix_tfvar)
else:
[b_matrix_out, c_matrix_out
] = tf.compat.v1.py_func(matrix_compressor.static_matrix_compressor,
[a_matrix_tfvar], [tf.float32, tf.float32])
b_matrix_assign_op = tf.compat.v1.assign(
self.b_matrix_tfvar, b_matrix_out, name='b_matrix_assign_op')
c_matrix_assign_op = tf.compat.v1.assign(
self.c_matrix_tfvar, c_matrix_out, name='c_matrix_assign_op')
with tf.control_dependencies([b_matrix_assign_op, c_matrix_assign_op]):
logging.info('Updating b_matrix,c_matrix.')
return tf.no_op('compresor_b_matrix_and_c_matrix_update')
def _compressor_and_alpha_update_op(self):
"""Applies compressor and also updates alpha."""
def compressor_op():
return self._compressor_op(self.matrix_compressor, self.a_matrix_tfvar)
def tf_no_op():
return tf.no_op()
cond_compressor_op = tf.cond(
pred=self._last_alpha_update_step < 0,
true_fn=compressor_op,
false_fn=tf_no_op)
with tf.control_dependencies([cond_compressor_op]):
with tf.control_dependencies([self._alpha_update_op()]):
return tf.no_op('alpha_update')
def _create_update_op(self):
"""Creates tensoflow update op for the compression."""
def maybe_update_alpha():
"""Operator to update alpha.
Checks if global_step is between begin_compression_step and
end_compression_step.
"""
with tf.compat.v1.name_scope(self._spec.name):
# prune if current step is more than begin_compression_step and
# less than end_compression_step (unless it's negative)
is_step_within_compression_range = tf.logical_and(
tf.greater_equal(
tf.cast(self._global_step, tf.int32),
self._spec.begin_compression_step),
tf.logical_or(
tf.less_equal(
tf.cast(self._global_step, tf.int32),
self._spec.end_compression_step),
tf.less(self._spec.end_compression_step, 0)))
is_compression_step = tf.less_equal(
tf.add(self._last_alpha_update_step,
self._spec.compression_frequency),
tf.cast(self._global_step, tf.int32))
return tf.logical_and(is_step_within_compression_range,
is_compression_step)
def no_update_op():
return tf.no_op()
def compressor_and_alpha_update_op_fn():
return self._compressor_and_alpha_update_op()
cond_alpha_update_op = tf.cond(
pred=maybe_update_alpha(),
true_fn=compressor_and_alpha_update_op_fn,
false_fn=no_update_op)
self.update_op = cond_alpha_update_op
return self.update_op
def get_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
scope='default_scope'):
"""Returns compressed tensorflow operator.
Does this for variable a_matrix_tfvar for
compression method specified in matrix_compressor that must be based on some
matrix factorization compression algorithm by replacing a variable
a_matrix by alpha*a_matrix + (1-alpha)b_matrix*c_matrix.
Args:
a_matrix_tfvar: TF variable representihg a tensor variable in a model.
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm. Must return two matrices b_matrix,c_matrix in its
compression.
scope: TF scope used for creating new TF variables.
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
self.matrix_compressor = matrix_compressor
a_matrix = np.zeros(shape=a_matrix_tfvar.shape)
[b_matrix, c_matrix] = matrix_compressor.static_matrix_compressor(a_matrix)
with tf.compat.v1.variable_scope(scope, use_resource=True):
self.b_matrix_tfvar = tf.compat.v1.get_variable(
'b_matrix',
dtype=tf.float32,
initializer=b_matrix.astype(np.float32),
trainable=self.matrix_compressor.get_spec().is_b_matrix_trainable)
self.c_matrix_tfvar = tf.compat.v1.get_variable(
'c_matrix',
dtype=tf.float32,
initializer=c_matrix.astype(np.float32),
trainable=self.matrix_compressor.get_spec().is_c_matrix_trainable)
self.alpha = tf.compat.v1.get_variable(
'alpha', dtype=tf.float32, trainable=False, initializer=1.0)
self.a_matrix_tfvar = a_matrix_tfvar
if self._spec.update_option == comp_op_utils.UpdateOptions.TF_UPDATE:
self.update_op = self._create_update_op()
else:
self.setup_update_explicit()
self.final_op = self.alpha * self.a_matrix_tfvar + (
1 - self.alpha) * tf.matmul(self.b_matrix_tfvar, self.c_matrix_tfvar)
if self._spec.add_summary:
self.add_compression_summaries()
return [self.final_op, self.update_op]
def get_customized_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
layer_obj,
weight_params_fn,
weight_init_obj,
scope='default_scope'):
"""Returns compressed tensorflow operator for a customized model/layer.
Does this for variable a_matrix_tfvar for
compression method specified in matrix_compressor that must be based on some
matrix factorization compression algorithm by replacing a variable
a_matrix by alpha*a_matrix + (1-alpha)b_matrix*c_matrix.
Args:
a_matrix_tfvar: TF variable representihg a tensor variable in a model.
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm. Must return two matrices b_matrix,c_matrix in its
compression.
layer_obj: a customeried layer object that handles variable creation.
weight_params_fn: functional handle to create model parameters.
weight_init_obj: a weight initialization object.
scope: TF scope used for creating new TF variables.
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
self.matrix_compressor = matrix_compressor
a_matrix = np.zeros(shape=a_matrix_tfvar.shape)
[b_matrix, c_matrix] = matrix_compressor.static_matrix_compressor(a_matrix)
p = layer_obj.params
with tf.variable_scope(scope) as scope:
b_matrix_pc = weight_params_fn(b_matrix.shape,
weight_init_obj.Constant(1.0), p.dtype)
c_matrix_pc = weight_params_fn(c_matrix.shape,
weight_init_obj.Constant(1.0), p.dtype)
alpha_pc = weight_params_fn([], weight_init_obj.Constant(1.0), tf.float32)
layer_obj.CreateVariable('alpha', alpha_pc, trainable=False)
layer_obj.CreateVariable(
'b_matrix_tfvar',
b_matrix_pc,
trainable=self.matrix_compressor.get_spec().is_b_matrix_trainable)
layer_obj.CreateVariable(
'c_matrix_tfvar',
c_matrix_pc,
trainable=self.matrix_compressor.get_spec().is_c_matrix_trainable)
self.b_matrix_tfvar = layer_obj.vars.b_matrix_tfvar
self.c_matrix_tfvar = layer_obj.vars.c_matrix_tfvar
self.alpha = layer_obj.vars.alpha
self.a_matrix_tfvar = a_matrix_tfvar
if self._spec.update_option == comp_op_utils.UpdateOptions.TF_UPDATE:
self.update_op = self._create_update_op()
else:
self.setup_update_explicit()
self.final_op = self.alpha * self.a_matrix_tfvar + (
1 - self.alpha) * tf.matmul(self.b_matrix_tfvar, self.c_matrix_tfvar)
if self._spec.add_summary:
self.add_compression_summaries()
return [self.final_op, self.update_op]
def get_mix_operator(self, theta, concat):
"""Performs matrix multiplication for customized layer.
This performs the compressed equivalent of tf.matmul(concat, theta.wm).
Args:
theta: object in customized layer that contains weight tensors, etc.
concat: the left operand of the matmul operation.
Returns:
A TensorFlow node that has compressed version of
tf.matmul(concat, theta.wm).
"""
return (theta.alpha * tf.matmul(concat, theta.wm) +
(1 - theta.alpha) * tf.matmul(
tf.matmul(concat, theta.b_matrix_tfvar), theta.c_matrix_tfvar))
def get_apply_embedding_lookup(self, ids):
"""Returns compressed tensorflow operator for embedding_lookup.
This method returns a TensorFlow node that performs embedding lookup as
alpha * tf.nn.embedding_lookup(a_matrix_tfvar, ids) +
(1 - alpha) * tf.nn.embedding_lookup(b_matrix_tfvar, ids) if c_matrix is
not present, and alpha * tf.nn.embedding_lookup(a_matrix_tfvar, ids) +
(1 - alpha) * tf.matmul(tf.nn.embedding_lookup(b_matrix_tfvar, ids),
c_matrix) if c_matrix is present, where b_matrix_tfvar and c_matrix_tfvar
are the factor matrices for the compressed embedding table.
Args:
ids: A Tensor with type int32 or int64 containing the ids to be looked up
in the embedding table (the a_matrix_tfvar variable).
Returns:
embedding_op: a TensorFlow node that performs compressed embedding lookup.
"""
if self.matrix_compressor.get_spec().is_c_matrix_present:
embedding_op = self.alpha * tf.nn.embedding_lookup(
self.a_matrix_tfvar, ids) + (1 - self.alpha) * tf.matmul(
tf.nn.embedding_lookup(self.b_matrix_tfvar, ids),
self.c_matrix_tfvar)
else:
embedding_op = self.alpha * tf.nn.embedding_lookup(
self.a_matrix_tfvar, ids) + (1 - self.alpha) * tf.nn.embedding_lookup(
self.b_matrix_tfvar, ids)
return embedding_op
def get_apply_matmul(self, left_operand):
"""Returns compressed TensorFlow node for matmul.
This method performs matmul (on the right) with the compressed matrix.
Args:
left_operand: a Tensor that is the left operand in matmul.
Returns:
matmul_op: a TensorFlow node that performs matmul of left_operand with the
compressed a_matrix_tfvar.
"""
# Applies matmul on the right
if self.matrix_compressor.get_spec().is_c_matrix_present:
matmul_op = self.alpha * tf.matmul(
left_operand, tf.transpose(
self.a_matrix_tfvar)) + (1 - self.alpha) * tf.matmul(
tf.matmul(left_operand, tf.transpose(self.c_matrix_tfvar)),
tf.transpose(self.b_matrix_tfvar))
else:
matmul_op = self.alpha * tf.matmul(
left_operand, tf.transpose(
self.a_matrix_tfvar)) + (1 - self.alpha) * tf.matmul(
left_operand, tf.transpose(self.b_matrix_tfvar))
return matmul_op
@staticmethod
def all_update_op(update_ops_list, scope='default_scope'):
"""Method to create a complete update op.
Args:
update_ops_list: list of individual update ops.
scope: tf scope for creating update op.
Returns:
A TensorFlow op that updates the compression related variables.
"""
with tf.compat.v1.name_scope(scope):
with tf.control_dependencies(update_ops_list):
logging.info('Updating all compression_ops.')
return tf.no_op('update_all_compression_ops')
def _validate_spec(self):
spec = self._spec
if spec.begin_compression_step < 0:
raise ValueError('Illegal value for begin_compression_step')
if spec.begin_compression_step >= spec.end_compression_step:
if spec.end_compression_step != -1:
raise ValueError(
'Compression must begin before it can end. begin_step=%d, '
'end_step=%d. Set end_compression_step to -1 if compression is '
'required till training stops' %
(spec.begin_compression_step, spec.end_compression_step))
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = tf.train.get_global_step()
logging.info('graph_global_step: %s', graph_global_step)
return tf.cast(graph_global_step, tf.int32)
def print_hparams(self):
logging.info(self._spec.to_json())
def setup_update_explicit(self):
self._alpha_update_tf_op = self._alpha_update_op()
return self._alpha_update_tf_op
def run_update_step(self, session, step_number=None):
"""Returns the combine update tf OP."""
logging.info('running run_update_step self._global_step is %s name is %s',
self._global_step, self.a_matrix_tfvar.op.name)
if step_number is None:
if self._spec.run_update_interval_check != 0:
logging.info(
'running run_update_step step_num is null self.globalstep is %s',
self._global_step)
step_number = session.run(self._global_step)
logging.info('running run_update_step step_num is %s', step_number)
else:
step_number = 1
logging.info(
'In compression op.run_update_step: '
'step_number is %s, begin, end and update_count are: %s %s %s ',
step_number, self._spec.begin_compression_step,
self._spec.end_compression_step, self.run_update_count)
if (step_number >= self._spec.begin_compression_step and
step_number < self._spec.end_compression_step):
logging.info(
'In compression op.run_update_step:'
'step_number is %s, begin, end and update_count are: %s %s %s ',
step_number, self._spec.begin_compression_step,
self._spec.end_compression_step, self.run_update_count)
self.run_update_count += 1
logging.info('inside compression interval')
# Need to persist these python state variables in TF as if a task gets
# aborted things get out of sync.
self._last_update_step = session.run(self._last_alpha_update_step)
logging.info(
'In compression op.run_update_step: '
'step_number is %s, begin, end, update_count, last_alpha_update'
' are: %s %s %s %s',
step_number, self._spec.begin_compression_step,
self._spec.end_compression_step, self.run_update_count,
self._last_update_step)
if self._last_update_step == -1:
logging.info(
'In compression op.run_update_step: step_number is %s, '
'begin, end, update_count are: %s %s %s ',
step_number, self._spec.begin_compression_step,
self._spec.end_compression_step, self.run_update_count)
print('inside compression interval: initial decomposition step')
a_matrix = session.run(self.a_matrix_tfvar)
logging.info(
'In compression op.run_update_step: '
'a_matrix.shape is %s norm is %d',
a_matrix.shape, np.linalg.norm(a_matrix))
if self.matrix_compressor.get_spec().is_c_matrix_present:
logging.info(
'In compression op.run_update_step: '
'step_number is %s, begin, end and update_count are: %s %s %s ',
step_number, self._spec.begin_compression_step,
self._spec.end_compression_step, self.run_update_count)
[b_matrix,
c_matrix] = self.matrix_compressor.static_matrix_compressor(a_matrix)
session.run(tf.assign(self.b_matrix_tfvar, b_matrix))
session.run(tf.assign(self.c_matrix_tfvar, c_matrix))
else:
[b_matrix] = self.matrix_compressor.static_matrix_compressor(a_matrix)
session.run(tf.assign(self.b_matrix_tfvar, b_matrix))
logging.info(
'In compression op.run_update_step: '
'step_number is %s, begin, end and update_count are: %s %s %s ',
step_number, self._spec.begin_compression_step,
self._spec.end_compression_step, self.run_update_count)
alpha = session.run(self.alpha)
self.last_alpha_value = alpha
if self.last_alpha_value > 0:
make_a_zero = False
new_alpha = max(alpha - self._spec.alpha_decrement_value, 0)
if make_a_zero and new_alpha == 0:
logging.info('Making a_matrix all zero for %s',
self.a_matrix_tfvar.op.name)
a_matrix = np.zeros(shape=self.a_matrix_tfvar.shape)
session.run(tf.assign(self.a_matrix_tfvar, a_matrix))
logging.info('in run_update_step decrementing alpha, alpha value is %d',
self.last_alpha_value)
logging.info(
'running run_update_step self._global_step is %s new and old alpha are %d %d',
self._global_step, alpha, new_alpha)
session.run(tf.assign(self.alpha, new_alpha))
self.last_alpha_value = new_alpha
self._last_update_step = step_number
session.run(tf.assign(self._last_alpha_update_step, step_number))
logging.info(
'In compression op.run_update_step: '
'step_number is %s, begin, end and update_count are: %s %s %s ',
step_number, self._spec.begin_compression_step,
self._spec.end_compression_step, self.run_update_count)
def get_update_op(self):
return self.update_op
def run_update_step_keras(self, step_number):
"""Keras version of run_update_step.
Run matrix and alpha update step if criterion is met.
Args:
step_number: step number in the training process.
Note: This method should only be called during training.
"""
if (step_number >= self._spec.begin_compression_step and
(step_number < self._spec.end_compression_step or
self._spec.end_compression_step == -1)):
if self.last_alpha_update_step.numpy() == -1:
a_matrix = self.a_matrix_tfvar.numpy()
b_matrix, c_matrix = self.matrix_compressor.static_matrix_compressor(
a_matrix)
self.b_matrix_tfvar.assign(b_matrix)
self.c_matrix_tfvar.assign(c_matrix)
if self.alpha.numpy() > 0:
self.alpha.assign(
max(self.alpha.numpy() - self._spec.alpha_decrement_value, 0))
self.last_alpha_update_step.assign(step_number)
class InputOutputCompressionOp(CompressionOpInterface):
"""Implements an input (and/or) output compression OP.
Replaces a vector-matrix multiplication with a compressed vector-smaller
matrix multiplication. The compression can happen on the input vector (input
compression) or on the output of the vector-matrix multiplication (output
compression).
Input compression projects the input vector into a lower-dimensional space
using a linear transform and replaces the original weight matrix by a smaller
matrix as a result.
Output compression replaces the product of an input vector and a weight matrix
by taking the product of the input with a smaller weight matrix and then
projecting this smaller sized output back up to the original dimensionality of
the product.
compress_input flag indicates if we want the input to be compressed. set to
True by default.
compress_output flag indicates if we want the output to be compressed. set to
False by default.
"""
def __init__(self, scope='default_scope', spec=None, global_step=None):
"""Initializer.
Args:
scope: TF scope used for creating new TF variables.
spec: compression hyper parameters default value given by
self.get_default_hparams().
global_step: tf variable that has the global step.
"""
super(InputOutputCompressionOp, self).__init__(scope, spec, global_step)
# Compression specification
self._spec = spec if spec else self.get_default_hparams()
logging.info('Compression spec in init InputOutputCompressionOp is: ')
self.print_hparams()
self._global_step = self._setup_global_step(global_step)
# public member variables to track the compressor, the variables and
# other tf nodes corresponding to this OP.
self.matrix_compressor = None
self.a_matrix_tfvar = None
self.b_matrix_tfvar = None
self.c_matrix_tfvar = None
self.final_op = None
self.uncompressed_size = 0
self.compressed_size = 0
@staticmethod
def get_default_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the compression specification. Used for adding summaries and ops
under a common tensorflow name_scope.
use_tpu: False
indicates whether to use TPU.
compression_option: integer
indicates what type of factorization (if any) is used.
rank: integer
indicates what type of factorization (if any) is used.
update_option: integer
indicates how the update logic is being run. More specifically:
0: TF_UPDATE - run the update logic in TF; needed when using GPU/TPU
1: PYTHON_UPDATE - run the update logic in regular python as opposed
to TF.
2: TF_AND_PYTHON_UPDATE - run the update logic in TF and in regular
python.
3: NO_UPDATE - no alpha update; not required for some compression ops.
TODO(wanxin): add doc strings for pruning hparams.
Returns:
tf.HParams object initialized to default values.
"""
return contrib_hparam.HParams(
name='input_compression',
compression_frequency=10,
use_tpu=False,
compression_option=comp_op_utils.CompressionOptions
.INPUTOUTPUT_COMPRESSION,
update_option=comp_op_utils.UpdateOptions.NO_UPDATE,
begin_compression_step=1000,
end_compression_step=2000,
is_b_matrix_trainable=True,
is_c_matrix_trainable=True,
is_d_matrix_trainable=True,
rank=4,
compress_input=True,
compress_output=False,
input_compression_factor=1,
input_block_size=32,
output_compression_factor=1,
output_block_size=1,
add_summary=True)
def add_compression_summaries(self):
"""Adds summaries."""
with tf.name_scope(self._spec.name + '_summaries'):
logging.info('add_compression_summaries scope name is %s',
self._spec.name)
tf.compat.v2.summary.scalar(
self.a_matrix_tfvar.op.name + '/a_matrix_norm',
tf.norm(self.a_matrix_tfvar))
if self._spec.compress_input:
tf.compat.v2.summary.scalar(
self.b_matrix_tfvar.op.name + '/b_matrix_norm',
tf.norm(tf.reshape(self.b_matrix_tfvar, [-1]), ord=1))
if self._spec.compress_output:
tf.compat.v2.summary.scalar(
self.d_matrix_tfvar.op.name + '/d_matrix_norm',
tf.norm(tf.reshape(self.d_matrix_tfvar, [-1]), ord=1))
tf.compat.v2.summary.scalar(
self.c_matrix_tfvar.op.name + '/c_matrix_norm',
tf.norm(self.c_matrix_tfvar))
def print_hparams(self):
logging.info(self._spec.to_json())
def get_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
scope='default_scope'):
"""Returns compressed tensorflow operator for input/output compression.
Args:
a_matrix_tfvar: TF variable representing a tensor variable in a model
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm. Must return two matrices b_matrix,c_matrix in its
compression.
scope: TF scope used for creating new TF variables
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
self.matrix_compressor = matrix_compressor
if self._spec.compress_input:
# input to be compressed. create b matrix with Xavier initialitation.
b_matrix_shape = [
self._spec.input_block_size,
self._spec.input_block_size // self._spec.input_compression_factor
]
b_limit = np.sqrt(
3.0 * (1 / np.max([1.,
(b_matrix_shape[0] + b_matrix_shape[1]) / 2.])))
b_matrix = np.random.uniform(-b_limit, b_limit, size=b_matrix_shape)
if self._spec.compress_output:
# output to be compressed. create d matrix with Xavier initialization.
d_matrix_shape = [
self._spec.output_block_size // self._spec.output_compression_factor,
self._spec.output_block_size
]
d_limit = np.sqrt(
3.0 * (1 / np.max([1.,
(d_matrix_shape[0] + d_matrix_shape[1]) / 2.])))
d_matrix = np.random.uniform(-d_limit, d_limit, size=d_matrix_shape)
# create c_matrix according to whether input is being compressed
# and whether the output is being compressed. Xavier init.
c_matrix_shape = [
a_matrix_tfvar.shape[0] // self._spec.input_compression_factor,
a_matrix_tfvar.shape[1] // self._spec.output_compression_factor
]
c_limit = np.sqrt(
3.0 * (1 / np.max([1., (c_matrix_shape[0] + c_matrix_shape[1]) / 2.])))
c_matrix = np.random.uniform(-c_limit, c_limit, size=c_matrix_shape)
# convert b,c,d from numpy arrays to tf tensors
with tf.compat.v1.variable_scope(scope, use_resource=True):
if self._spec.compress_input:
self.b_matrix_tfvar = tf.compat.v1.get_variable(
'b_matrix',
dtype=tf.float32,
initializer=b_matrix.astype(np.float32),
trainable=self.matrix_compressor.get_spec().is_b_matrix_trainable)
if self._spec.compress_output:
self.d_matrix_tfvar = tf.compat.v1.get_variable(
'd_matrix',
dtype=tf.float32,
initializer=d_matrix.astype(np.float32),
trainable=self.matrix_compressor.get_spec().is_d_matrix_trainable)
self.c_matrix_tfvar = tf.compat.v1.get_variable(
'c_matrix',
dtype=tf.float32,
initializer=c_matrix.astype(np.float32),
trainable=self.matrix_compressor.get_spec().is_c_matrix_trainable)
self.a_matrix_tfvar = a_matrix_tfvar
# update_op and final_op not necessary for InputOutputCompressionOp.
self.update_op = tf.no_op()
self.final_op = tf.no_op()
if self._spec.add_summary:
self.add_compression_summaries()
return [self.final_op, self.update_op]
def get_customized_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
layer_obj,
weight_params_fn,
weight_init_obj,
scope='default_scope'):
"""Returns input (and) or output compressed operator for a babelfish layer.
Args:
a_matrix_tfvar: TF variable representing a tensor variable in a model.
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm. Must return two matrices b_matrix,c_matrix in its
compression.
layer_obj: a customeried layer object that handles variable creation.
weight_params_fn: functional handle to create model parameters.
weight_init_obj: a weight initialization object.
scope: TF scope used for creating new TF variables.
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
self.matrix_compressor = matrix_compressor
with tf.variable_scope(scope) as scope:
if self._spec.compress_input:
# input-side compression being applied.
# create b with appropriate shape and init params.
b_matrix_pc = weight_params_fn([
self._spec.input_block_size,
self._spec.input_block_size // self._spec.input_compression_factor
], weight_init_obj.Xavier(1.0), layer_obj.params.dtype)
if self._spec.compress_output:
# output-side compression being applied.
# create d with appropriate shape and init params.
d_matrix_pc = weight_params_fn([
self._spec.output_block_size //
self._spec.output_compression_factor, self._spec.output_block_size
], weight_init_obj.Xavier(1.0), layer_obj.params.dtype)
# shape of c determined by whether input-side and output-side compression
# are turned on.
c_matrix_pc = weight_params_fn([
a_matrix_tfvar.shape[0] // self._spec.input_compression_factor,
a_matrix_tfvar.shape[1] // self._spec.output_compression_factor
], weight_init_obj.Xavier(1.0), layer_obj.params.dtype)
# create the TF variables using babelfish variable creation function
if self._spec.compress_input:
layer_obj.CreateVariable(
'b_matrix_tfvar',
b_matrix_pc,
trainable=self.matrix_compressor.get_spec().is_b_matrix_trainable)
if self._spec.compress_output:
layer_obj.CreateVariable(
'd_matrix_tfvar',
d_matrix_pc,
trainable=self.matrix_compressor.get_spec().is_d_matrix_trainable)
layer_obj.CreateVariable(
'c_matrix_tfvar',
c_matrix_pc,
trainable=self.matrix_compressor.get_spec().is_c_matrix_trainable)
if self._spec.compress_input:
self.b_matrix_tfvar = layer_obj.vars.b_matrix_tfvar
if self._spec.compress_output:
self.d_matrix_tfvar = layer_obj.vars.d_matrix_tfvar
self.c_matrix_tfvar = layer_obj.vars.c_matrix_tfvar
self.a_matrix_tfvar = a_matrix_tfvar
self.final_op = tf.no_op()
if self._spec.add_summary:
self.add_compression_summaries()
self.update_op = tf.no_op()
return [self.final_op, self.update_op]
def get_apply_compression_op_keras(self,
a_matrix_tfvar,
matrix_compressor,
layer):
"""Returns compressed tensorflow operator for input compression.
Args:
a_matrix_tfvar: TF variable representihg a tensor variable in a model
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm. Must return two matrices b_matrix,c_matrix in its
compression.
layer: keras layer object calling this function. Must support add_weight
method.
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
self.matrix_compressor = matrix_compressor
if self._spec.compress_input:
# input-side compression being applied.
# create b with appropriate shape and init params.
b_matrix_shape = [
self._spec.input_block_size,
self._spec.input_block_size // self._spec.input_compression_factor
]
self.b_matrix_tfvar = layer.add_weight(
'b_matrix',
shape=b_matrix_shape,
initializer=layer.kernel_initializer,
regularizer=layer.kernel_regularizer,
constraint=layer.kernel_constraint,
dtype=layer.dtype,
trainable=True)
if self._spec.compress_output:
# output-side compression being applied.
# create d with appropriate shape and init params.
d_matrix_shape = [
self._spec.output_block_size // self._spec.output_compression_factor,
self._spec.output_block_size
]
self.d_matrix_tfvar = layer.add_weight(
'd_matrix',
shape=d_matrix_shape,
initializer=layer.kernel_initializer,
regularizer=layer.kernel_regularizer,
constraint=layer.kernel_constraint,
dtype=layer.dtype,
trainable=True)
c_matrix_shape = [
a_matrix_tfvar.shape[0] // self._spec.input_compression_factor,
a_matrix_tfvar.shape[1] // self._spec.output_compression_factor
]
self.c_matrix_tfvar = layer.add_weight(
'c_matrix',
shape=c_matrix_shape,
initializer=layer.kernel_initializer,
regularizer=layer.kernel_regularizer,
constraint=layer.kernel_constraint,
dtype=layer.dtype,
trainable=True)
self.a_matrix_tfvar = a_matrix_tfvar
self.update_op = tf.no_op()
self.final_op = tf.no_op()
print('****************returning these self.final_op, self.update_op',
self.final_op, self.update_op)
# self.add_compression_summaries()
return [self.final_op, self.update_op]
def get_apply_matmul(self, left_operand):
"""Returns input (and/or) output compressed TensorFlow node for matmul.
This method performs matmul according to the compression
procedure.
Args:
left_operand: a Tensor that is the left operand in matmul.
Returns:
matmul_op: a TensorFlow node that performs matmul of left_operand with the
compressed a_matrix_tfvar.
"""
if self._spec.compress_input:
# block the left operand into blocks of size input_block_size.
blocked_left_operand = tf.reshape(
left_operand,
tf.concat([
tf.shape(left_operand)[:-1],
[tf.shape(left_operand)[-1] // self._spec.input_block_size],
[self._spec.input_block_size]
], axis=0))
# project blocked_left_operand down using b.
projected_blocked_left_operand = tf.matmul(blocked_left_operand,
self.b_matrix_tfvar)
# flatten the block dimension in projected_blocked_left_operand.
compressed_left_operand = tf.reshape(
projected_blocked_left_operand,
tf.concat([
tf.shape(left_operand)[:-1],
[
tf.shape(left_operand)[-1] //
self._spec.input_compression_factor
]
], axis=0))
else:
# input not being compressed
compressed_left_operand = left_operand
# multiply compressed_left_operand with c.
intermediate_result = tf.matmul(compressed_left_operand,
self.c_matrix_tfvar)
if self._spec.compress_output:
# block intermediate_result
block_size = self._spec.output_block_size // self._spec.output_compression_factor
blocked_intermediate_result = tf.reshape(
intermediate_result,
tf.concat([
tf.shape(intermediate_result)[:-1],
[tf.shape(intermediate_result)[-1] // block_size],
[block_size]
], axis=0))
# project blocked_intermediate_result up using d.
projected_blocked_intermediate_result = tf.matmul(
blocked_intermediate_result, self.d_matrix_tfvar)
# flatten block dimension in projected_blocked_intermediate_result.
compressed_result = tf.reshape(
projected_blocked_intermediate_result,
tf.concat([
tf.shape(intermediate_result)[:-1],
[
tf.shape(intermediate_result)[-1] *
self._spec.output_compression_factor
]
], axis=0))
else:
# output not being compressed
compressed_result = intermediate_result
return compressed_result
def get_mix_operator(self, theta, concat):
"""Performs matrix multiplication on compressed input for Babelfish LSTM layers.
This performs the input (and/or) output compressed equivalent of
tf.matmul(concat, theta.wm).
Args:
theta: object in customized layer that contains weight tensors, etc.
concat: the left operand of the matmul operation. a rank 2 tensor.
Returns:
A TensorFlow node that has compressed version of
tf.matmul(concat, theta.wm).
"""
if self._spec.compress_input:
# block concat into blocks of size input_block_size.
blocked_concat = tf.reshape(
concat,
tf.concat([
tf.shape(concat)[:-1],
[tf.shape(concat)[-1] // self._spec.input_block_size],
[self._spec.input_block_size]
],
axis=0))
# project blocked_left_operand down using b.
projected_blocked_concat = tf.matmul(blocked_concat, theta.b_matrix_tfvar)
# flatten the block dimension in projected_blocked_concat.
compressed_concat = tf.reshape(
projected_blocked_concat,
tf.concat([
tf.shape(concat)[:-1],
[tf.shape(concat)[-1] // self._spec.input_compression_factor]
], axis=0))
else:
compressed_concat = concat
# multiply compressed concat with c.
intermediate_result = tf.matmul(compressed_concat, theta.c_matrix_tfvar)
if self._spec.compress_output:
# block intermediate_result into blocks
block_size = self._spec.output_block_size // self._spec.output_compression_factor
blocked_intermediate_result = tf.reshape(
intermediate_result,
tf.concat([
tf.shape(intermediate_result)[:-1],
[tf.shape(intermediate_result)[-1] // block_size], [block_size]
],
axis=0))
# project blocked_intermediate_result up using d.
projected_intermediate_result = tf.matmul(blocked_intermediate_result,
theta.d_matrix_tfvar)
# flatten the block dimension
compressed_result = tf.reshape(
projected_intermediate_result,
tf.concat([
tf.shape(intermediate_result)[:-1],
[
tf.shape(intermediate_result)[-1] *
self._spec.output_compression_factor
]
], axis=0))
else:
compressed_result = intermediate_result
return compressed_result
def get_matmul_operator(self,
inputs,
wm,
transpose_a=False,
transpose_b=False):
"""Performs matrix multiplication on compressed input for customized Softmax layers.
This performs the input (and/or) output compressed equivalent of
tf.matmul(inputs, wm).
Args:
inputs: the left operand of the matmul operation. a rank 2 tensor.
wm: the right operand of the matmul operator. a rank 2 tensor.
transpose_a: whether inputs tensor needs to be transposed before matmul.
transpose_b: whether wm tensor needs to be transposed before matmul.
Returns:
A TensorFlow node that has compressed version of
tf.matmul(inputs, wm).
"""
if transpose_a:
inputs = tf.transpose(inputs)
if transpose_b:
wm = tf.transpose(wm)
if self._spec.compress_input:
# block inputs into blocks of size input_block_size.
blocked_inputs = tf.reshape(inputs, [
-1,
tf.shape(inputs)[1] // self._spec.input_block_size,
self._spec.input_block_size
])
# project blocked_inputs down using b.
projected_blocked_inputs = tf.matmul(blocked_inputs, self.b_matrix_tfvar)
# flatten the block dimension in projected_blocked_inputs.
compressed_inputs = tf.reshape(
projected_blocked_inputs,
[tf.shape(inputs)[0], -1])
else:
compressed_inputs = inputs
# multiply compressed inputs with c.
intermediate_result = tf.matmul(compressed_inputs, self.c_matrix_tfvar)
if self._spec.compress_output:
# block intermediate_result into blocks
block_size = self._spec.output_block_size // self._spec.output_compression_factor
blocked_intermediate_result = tf.reshape(
intermediate_result,
[tf.shape(intermediate_result)[0], -1, block_size])
# project blocked_intermediate_result up using d.
projected_intermediate_result = tf.matmul(blocked_intermediate_result,
self.d_matrix_tfvar)
# flatten the block dimension
compressed_result = tf.reshape(
projected_intermediate_result,
[tf.shape(projected_intermediate_result)[0], -1])
else:
compressed_result = intermediate_result
return compressed_result
def get_einsum_operator(self,
inputs,
layerobj):
"""Performs compressed matrix multiplication for customized ProjectionLayer.
This performs the input (and/or) output compressed equivalent of
tf.matmul(inputs, weight).
Args:
inputs: the left operand of the matmul operation.
layerobj: the ProjectionLayer object from where get_einsum_operator
is called.
Returns:
A TensorFlow node that has compressed version of
tf.matmul(inputs, wm).
"""
theta = layerobj.theta
if self._spec.compress_input:
# block inputs into blocks of size input_block_size.
blocked_inputs = tf.reshape(
inputs,
tf.concat([
tf.shape(inputs)[:-1],
[tf.shape(inputs)[-1] // self._spec.input_block_size],
[self._spec.input_block_size]
],
axis=0))
# project blocked_inputs down using b.
projected_blocked_inputs = tf.matmul(blocked_inputs, theta.b_matrix_tfvar)
# flatten the block dimension in projected_blocked_concat.
compressed_inputs = tf.reshape(
projected_blocked_inputs,
tf.concat([
tf.shape(inputs)[:-1],
[tf.shape(inputs)[-1] // self._spec.input_compression_factor]
], axis=0))
else:
compressed_inputs = inputs
# multiply compressed inputs with c.
intermediate_result = tf.matmul(compressed_inputs, theta.c_matrix_tfvar)
if self._spec.compress_output:
# block intermediate_result into blocks
block_size = self._spec.output_block_size // self._spec.output_compression_factor
blocked_intermediate_result = tf.reshape(
intermediate_result,
tf.concat([
tf.shape(intermediate_result)[:-1],
[tf.shape(intermediate_result)[-1] // block_size], [block_size]
],
axis=0))
# project blocked_intermediate_result up using d.
projected_intermediate_result = tf.matmul(blocked_intermediate_result,
theta.d_matrix_tfvar)
# flatten the block dimension
compressed_result = tf.reshape(
projected_intermediate_result,
tf.concat([
tf.shape(intermediate_result)[:-1],
[
tf.shape(intermediate_result)[-1] *
self._spec.output_compression_factor
]
], axis=0))
else:
compressed_result = intermediate_result
return compressed_result
class BlockCompressionOp(CompressionOpInterface):
"""Implements a block diagonal compression OP.
Replaces the weight matrix with a block-diagonal matrix. This produces an
effect similar to input/output compression but without the need to reshape
the input vector.
block_method: string.
"mask" creates block diagonal matrices by masking out entries outside of the
diagonal blocks.
"loop" stores the blocks in a rank 3 tensor and loops through them during
the multiplication step.
block_compression_factor: integer.
Factor by which number of (non-zero) parameters in weight matrix is reduced
by.
"""
def __init__(self, scope='default_scope', spec=None, global_step=None):
"""Initializer.
Args:
scope: TF scope used for creating new TF variables.
spec: compression hyper parameters default value given by
self.get_default_hparams().
global_step: tf variable that has the global step.
"""
super(BlockCompressionOp, self).__init__(scope, spec, global_step)
# Compression specification
self._spec = spec if spec else self.get_default_hparams()
logging.info('Compression spec in init CompressionOp is: ')
self.print_hparams()
self._global_step = self._setup_global_step(global_step)
# public member variables to track the compressor, the variables and
# other tf nodes corresponding to this OP.
self.matrix_compressor = None
self.a_matrix_tfvar = None
self.b_matrix_tfvar = None
self.c_matrix_tfvar = None
self.final_op = None
self.uncompressed_size = 0
self.compressed_size = 0
def print_hparams(self):
logging.info(self._spec.to_json())
@staticmethod
def get_default_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the compression specification. Used for adding summaries and ops
under a common tensorflow name_scope.
use_tpu: False
indicates whether to use TPU.
compression_option: integer
indicates what type of factorization (if any) is used.
rank: integer
indicates what type of factorization (if any) is used.
update_option: integer
indicates how the update logic is being run. More specifically:
0: TF_UPDATE - run the update logic in TF; needed when using GPU/TPU
1: PYTHON_UPDATE - run the update logic in regular python as opposed
to TF.
2: TF_AND_PYTHON_UPDATE - run the update logic in TF and in regular
python.
3: NO_UPDATE - no alpha update; not required for some compression ops.
TODO(wanxin): add doc strings for pruning hparams.
Returns:
tf.HParams object initialized to default values.
"""
return contrib_hparam.HParams(
name='block_compression',
compression_frequency=10,
use_tpu=False,
compression_option=comp_op_utils.CompressionOptions.BLOCK_COMPRESSION,
update_option=comp_op_utils.UpdateOptions.NO_UPDATE,
begin_compression_step=1000,
end_compression_step=2000,
is_c_matrix_trainable=True,
compress_input=False,
compress_output=False,
input_compression_factor=1,
output_compression_factor=1,
block_compression_factor=1,
block_method='loop',
add_summary=True)
def add_compression_summaries(self):
"""Adds summaries."""
with tf.name_scope(self._spec.name + '_summaries'):
logging.info('add_compression_summaries scope name is %s',
self._spec.name)
tf.compat.v2.summary.scalar(
self.a_matrix_tfvar.op.name + '/a_matrix_norm',
tf.norm(self.a_matrix_tfvar))
tf.compat.v2.summary.scalar(
self.c_matrix_tfvar.op.name + '/c_matrix_norm',
tf.norm(self.c_matrix_tfvar))
def get_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
scope='default_scope'):
"""Returns compressed tensorflow operator for block diagonal compression.
Args:
a_matrix_tfvar: TF variable representihg a tensor variable in a model
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm. Must return two matrices b_matrix,c_matrix in its
compression.
scope: TF scope used for creating new TF variables
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
self.matrix_compressor = matrix_compressor
if self._spec.block_method == 'mask':
# create c_matrix
c_limit = np.sqrt(3.0 * (1 / np.max(
[1., (a_matrix_tfvar.shape[0] + a_matrix_tfvar.shape[1]) / 2.])))
c_matrix = np.random.uniform(-c_limit, c_limit, size=a_matrix_tfvar.shape)
# create block diagonal mask for c_matrix
num_blocks = self._spec.block_compression_factor
num_rows, num_cols = a_matrix_tfvar.shape.as_list()
r_block, c_block = num_rows // num_blocks, num_cols // num_blocks
c_mask = np.array([[
float(j // c_block == i // r_block) for j in range(num_cols)
] for i in range(num_rows)])
elif self._spec.block_method == 'loop':
# create c_matrix, which is a rank 3 tensor
c_limit = np.sqrt(3.0 * (1 / np.max(
[1., (a_matrix_tfvar.shape[0] + a_matrix_tfvar.shape[1]) / 2.])))
num_blocks = self._spec.block_compression_factor
c_matrix_shape = [
num_blocks, a_matrix_tfvar.shape[0] // 2, a_matrix_tfvar.shape[1] // 2
]
c_matrix = np.random.uniform(-c_limit, c_limit, size=c_matrix_shape)
# convert c_matrix and c_mask from numpy arrays to tf tensors
with tf.compat.v1.variable_scope(scope, use_resource=True):
self.c_matrix_tfvar = tf.compat.v1.get_variable(
'c_matrix',
dtype=tf.float32,
initializer=c_matrix.astype(np.float32),
trainable=self.matrix_compressor.get_spec().is_c_matrix_trainable)
self.a_matrix_tfvar = a_matrix_tfvar
if self._spec.block_method == 'mask':
self.c_mask_tfvar = tf.compat.v1.get_variable(
'c_mask',
dtype=tf.float32,
initializer=c_mask.astype(np.float32),
trainable=False)
# update_op and final_op not necessary for BlockCompressionOp.
self.update_op = tf.no_op()
self.final_op = tf.no_op()
if self._spec.add_summary:
self.add_compression_summaries()
return [self.final_op, self.update_op]
def get_customized_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
layer_obj,
weight_params_fn,
weight_init_obj,
scope='default_scope'):
"""Returns compressed operator for block diagonal compression.
Args:
a_matrix_tfvar: TF variable representing a tensor variable in a model.
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm. Must return two matrices b_matrix,c_matrix in its
compression.
layer_obj: a customized layer object that handles variable creation.
weight_params_fn: functional handle to create model parameters.
weight_init_obj: a weight initialization object.
scope: TF scope used for creating new TF variables.
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
self.matrix_compressor = matrix_compressor
with tf.variable_scope(scope) as scope:
if self._spec.block_method == 'mask':
# c_matrix has same shape as a_matrix
c_matrix_pc = weight_params_fn(a_matrix_tfvar.shape,
weight_init_obj.Xavier(1.0),
layer_obj.params.dtype)
# create block diagonal mask for c_matrix
num_blocks = self._spec.block_compression_factor
num_rows, num_cols = a_matrix_tfvar.shape.as_list()
r_block, c_block = num_rows // num_blocks, num_cols // num_blocks
c_mask = np.array(
[[float(j // c_block == i // r_block)
for j in range(num_cols)]
for i in range(num_rows)])
c_mask_pc = weight_params_fn(a_matrix_tfvar.shape,
weight_init_obj.Constant(c_mask),
layer_obj.params.dtype)
elif self._spec.block_method == 'loop':
# c_matrix is a rank 3 tensor consisting of num_blocks blocks
num_blocks = self._spec.block_compression_factor
c_matrix_pc = weight_params_fn([
num_blocks, a_matrix_tfvar.shape[0] // num_blocks,
a_matrix_tfvar.shape[1] // num_blocks
], weight_init_obj.Xavier(1.0), layer_obj.params.dtype)
# create the c_matrix and c_mask variables
layer_obj.CreateVariable(
'c_matrix_tfvar',
c_matrix_pc,
trainable=self.matrix_compressor.get_spec().is_c_matrix_trainable)
if self._spec.block_method == 'mask':
layer_obj.CreateVariable('c_mask_tfvar', c_mask_pc, trainable=False)
self.c_matrix_tfvar = layer_obj.vars.c_matrix_tfvar
self.a_matrix_tfvar = a_matrix_tfvar
if self._spec.block_method == 'mask':
self.c_mask_tfvar = layer_obj.vars.c_mask_tfvar
self.final_op = tf.no_op()
if self._spec.add_summary:
self.add_compression_summaries()
self.update_op = tf.no_op()
return [self.final_op, self.update_op]
def get_apply_matmul(self, left_operand):
"""Returns block diagonal compressed TensorFlow node for matmul.
This method performs matmul according to the compression
procedure.
Args:
left_operand: a Tensor that is the left operand in matmul.
Returns:
matmul_op: a TensorFlow node that performs matmul of left_operand with the
compressed a_matrix_tfvar.
"""
if self._spec.block_method == 'mask':
return tf.matmul(left_operand,
tf.multiply(self.c_matrix_tfvar, self.c_mask_tfvar))
elif self._spec.block_method == 'loop':
num_blocks = self._spec.block_compression_factor
input_splitted = tf.split(left_operand, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(tf.matmul(input_i, self.c_matrix_tfvar[i, :, :]))
return tf.concat(output_splitted, axis=-1)
def get_mix_operator(self, theta, concat):
"""Performs matrix multiplication on customized LSTM layers.
This performs the block diagonal compressed equivalent of
tf.matmul(concat, theta.wm).
Args:
theta: object in customized layer that contains weight tensors, etc.
concat: the left operand of the matmul operation. a rank 2 tensor.
Returns:
A TensorFlow node that has compressed version of
tf.matmul(concat, theta.wm).
"""
if self._spec.block_method == 'mask':
return tf.matmul(concat,
tf.multiply(theta.c_matrix_tfvar, theta.c_mask_tfvar))
elif self._spec.block_method == 'loop':
num_blocks = self._spec.block_compression_factor
input_splitted = tf.split(concat, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(
tf.matmul(input_i, theta.c_matrix_tfvar[i, :, :]))
return tf.concat(output_splitted, axis=-1)
def get_matmul_operator(self,
inputs,
wm,
transpose_a=False,
transpose_b=False):
"""Performs matrix multiplication for customized Softmax layers.
This performs the block diagonal compressed equivalent of
tf.matmul(inputs, wm).
Args:
inputs: the left operand of the matmul operation. a rank 2 tensor.
wm: the right operand of the matmul operator. a rank 2 tensor.
transpose_a: whether inputs tensor needs to be transposed before matmul.
transpose_b: whether wm tensor needs to be transposed before matmul.
Returns:
A TensorFlow node that has compressed version of
tf.matmul(inputs, wm).
"""
if transpose_a:
inputs = tf.transpose(inputs)
if transpose_b:
wm = tf.transpose(wm)
if self._spec.block_method == 'mask':
return tf.matmul(inputs,
tf.multiply(self.c_matrix_tfvar, self.c_mask_tfvar))
elif self._spec.block_method == 'loop':
num_blocks = self._spec.block_compression_factor
input_splitted = tf.split(inputs, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(
tf.matmul(input_i, self.c_matrix_tfvar[i, :, :]))
return tf.concat(output_splitted, axis=-1)
def get_einsum_operator(self, inputs, layerobj):
"""Performs compressed matrix multiplication for customized ProjectionLayer.
This performs the block diagonal compressed equivalent of
tf.matmul(inputs, weight).
Args:
inputs: the left operand of the matmul operation.
layerobj: the ProjectionLayer object from where get_einsum_operator is
called.
Returns:
A TensorFlow node that has compressed version of
tf.matmul(inputs, wm).
"""
theta = layerobj.theta
if self._spec.block_method == 'mask':
return tf.matmul(inputs,
tf.multiply(theta.c_matrix_tfvar, theta.c_mask_tfvar))
elif self._spec.block_method == 'loop':
num_blocks = self._spec.block_compression_factor
input_splitted = tf.split(inputs, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(tf.matmul(input_i, self.c_matrix_tfvar[i, :, :]))
return tf.concat(output_splitted, axis=-1)
| 39.700055 | 90 | 0.671345 |
7953d0fc2ad622a50b15d569e46f56c509413ecb | 6,783 | py | Python | third_party/WebKit/Source/bindings/scripts/code_generator_web_module.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | third_party/WebKit/Source/bindings/scripts/code_generator_web_module.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | third_party/WebKit/Source/bindings/scripts/code_generator_web_module.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=import-error,print-statement,relative-import
"""Generates Blink Web Module bindings.
The Blink Web Module bindings provide a stable, IDL-generated interface for the
Web Modules.
The Web Modules are the high-level services like Autofill,
Autocomplete, Translate, Distiller, Phishing Detector, and others. Web Modules
typically want to introspec the document and rendering infromation to implement
browser features.
The bindings are meant to be as simple and as ephemeral as possible, mostly just
wrapping existing DOM classes. Their primary goal is to avoid leaking the actual
DOM classes to the Web Modules layer.
"""
import os
import posixpath
from code_generator import CodeGeneratorBase, render_template
# TODO(dglazkov): Move TypedefResolver to code_generator.py
from code_generator_v8 import TypedefResolver
MODULE_PYNAME = os.path.splitext(os.path.basename(__file__))[0] + '.py'
WEB_MODULE_IDL_ATTRIBUTE = 'WebModuleAPI'
STRING_INCLUDE_PATH = 'wtf/text/WTFString.h'
def interface_context(idl_interface):
builder = InterfaceContextBuilder(MODULE_PYNAME, TypeResolver())
builder.set_class_name(idl_interface.name)
builder.set_inheritance(idl_interface.parent)
for idl_attribute in idl_interface.attributes:
builder.add_attribute(idl_attribute)
for idl_operation in idl_interface.operations:
builder.add_operation(idl_operation)
return builder.build()
class TypeResolver(object):
"""Resolves Web IDL types into corresponding C++ types and include paths
to the generated and existing files."""
def includes_from_interface(self, base_type):
# TODO(dglazkov): Are there any exceptional conditions here?
return set([base_type])
def _includes_from_type(self, idl_type):
if idl_type.is_void:
return set()
if idl_type.is_primitive_type:
return set()
if idl_type.is_string_type:
return set([STRING_INCLUDE_PATH])
# TODO(dglazkov): Handle complex/weird types.
# TODO(dglazkov): Make these proper paths to generated and non-generated
# files.
return set([idl_type.base_type])
def includes_from_definition(self, idl_definition):
return self._includes_from_type(idl_definition.idl_type)
def type_from_definition(self, idl_definition):
# TODO(dglazkov): The output of this method must be a reasonable C++
# type that can be used directly in the jinja2 template.
return idl_definition.idl_type.base_type
class InterfaceContextBuilder(object):
def __init__(self, code_generator, type_resolver):
self.result = {'code_generator': code_generator}
self.type_resolver = type_resolver
def set_class_name(self, class_name):
self.result['class_name'] = class_name
def set_inheritance(self, base_interface):
if base_interface is None:
return
self.result['inherits_expression'] = ' : public %s' % base_interface
self._ensure_set('cpp_includes').update(
self.type_resolver.includes_from_interface(base_interface))
def _ensure_set(self, name):
return self.result.setdefault(name, set())
def _ensure_list(self, name):
return self.result.setdefault(name, [])
def add_attribute(self, idl_attribute):
self._ensure_list('attributes').append(
self.create_attribute(idl_attribute))
self._ensure_set('cpp_includes').update(
self.type_resolver.includes_from_definition(idl_attribute))
def add_operation(self, idl_operation):
if not idl_operation.name:
return
self._ensure_list('methods').append(
self.create_method(idl_operation))
self._ensure_set('cpp_includes').update(
self.type_resolver.includes_from_definition(idl_operation))
def create_method(self, idl_operation):
name = idl_operation.name
return_type = self.type_resolver.type_from_definition(idl_operation)
return {
'name': name,
'return_type': return_type
}
def create_attribute(self, idl_attribute):
name = idl_attribute.name
return_type = self.type_resolver.type_from_definition(idl_attribute)
return {
'name': name,
'return_type': return_type
}
def build(self):
return self.result
class CodeGeneratorWebModule(CodeGeneratorBase):
def __init__(self, info_provider, cache_dir, output_dir):
CodeGeneratorBase.__init__(self, MODULE_PYNAME, info_provider,
cache_dir, output_dir)
self.typedef_resolver = TypedefResolver(info_provider)
def get_template(self, file_extension):
template_filename = 'web_module_interface.%s.tmpl' % file_extension
return self.jinja_env.get_template(template_filename)
# TODO(dglazkov): Move to CodeGeneratorBase.
def output_paths(self, definition_name):
header_path = posixpath.join(self.output_dir,
'Web%s.h' % definition_name)
cpp_path = posixpath.join(self.output_dir,
'Web%s.cpp' % definition_name)
return header_path, cpp_path
def generate_interface_code(self, interface):
# TODO(dglazkov): Implement callback interfaces.
# TODO(dglazkov): Make sure partial interfaces are handled.
if interface.is_callback or interface.is_partial:
raise ValueError('Partial or callback interfaces are not supported')
template_context = interface_context(interface)
cpp_template = self.get_template('cpp')
header_template = self.get_template('h')
cpp_text = render_template(cpp_template, template_context)
header_text = render_template(header_template, template_context)
header_path, cpp_path = self.output_paths(interface.name)
return (
(header_path, header_text),
(cpp_path, cpp_text)
)
def generate_code(self, definitions, definition_name):
self.typedef_resolver.resolve(definitions, definition_name)
header_path, cpp_path = self.output_paths(definition_name)
template_context = {}
# TODO(dglazkov): Implement dictionaries
if definition_name not in definitions.interfaces:
return None
interface = definitions.interfaces[definition_name]
if WEB_MODULE_IDL_ATTRIBUTE not in interface.extended_attributes:
return None
return self.generate_interface_code(interface)
| 36.86413 | 80 | 0.701902 |
7953d23e9ee28c6540142bfe54dbf185ea6e76f6 | 12,890 | py | Python | setup.py | Technologicat/orderfix | 0235bec6bd4ad978356540d91878aed4a09826f6 | [
"BSD-2-Clause"
] | null | null | null | setup.py | Technologicat/orderfix | 0235bec6bd4ad978356540d91878aed4a09826f6 | [
"BSD-2-Clause"
] | null | null | null | setup.py | Technologicat/orderfix | 0235bec6bd4ad978356540d91878aed4a09826f6 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
"""setuptools-based setup.py for orderfix.
Supports Python 2.7 and 3.4.
Usage as usual with setuptools:
python setup.py build_ext
python setup.py build
python setup.py install
python setup.py sdist
For details, see
http://setuptools.readthedocs.io/en/latest/setuptools.html#command-reference
or
python setup.py --help
python setup.py --help-commands
python setup.py --help bdist_wheel # or any command
"""
from __future__ import division, print_function, absolute_import
try:
# Python 3
MyFileNotFoundError = FileNotFoundError
except: # FileNotFoundError does not exist in Python 2.7
# Python 2.7
# - open() raises IOError
# - remove() (not currently used here) raises OSError
MyFileNotFoundError = (IOError, OSError)
#########################################################
# General config
#########################################################
# Name of the top-level package of your library.
#
# This is also the top level of its source tree, relative to the top-level project directory setup.py resides in.
#
libname="orderfix"
# Choose build type.
#
build_type="optimized"
#build_type="debug"
# Short description for package list on PyPI
#
SHORTDESC="reorder solutions of parametric studies to make continuous curves"
# Long description for package homepage on PyPI
#
DESC="""Reorder solutions of parametric studies (assumed to be in random order) to make continuous curves.
The common use case is postprocessing of numerically computed eigenvalues from parametric studies of linear PDE boundary-value problems.
The ordering of the numerically computed eigenvalues may suddenly change, as the problem parameter sweeps through the range of interest.
The reordering allows the plotting of continuous curves, which are much more readable visually than scatterplots of disconnected points.
Supports Python 2.7 and 3.4.
"""
# Set up data files for packaging.
#
# Directories (relative to the top-level directory where setup.py resides) in which to look for data files.
datadirs = ("test",)
# File extensions to be considered as data files. (Literal, no wildcards.)
dataexts = (".py", ".pyx", ".pxd", ".c", ".cpp", ".h", ".sh", ".lyx", ".tex", ".txt", ".pdf")
# Standard documentation to detect (and package if it exists).
#
standard_docs = ["README", "LICENSE", "TODO", "CHANGELOG", "AUTHORS"] # just the basename without file extension
standard_doc_exts = [".md", ".rst", ".txt", ""] # commonly .md for GitHub projects, but other projects may use .rst or .txt (or even blank).
#########################################################
# Init
#########################################################
# check for Python 2.7 or later
# http://stackoverflow.com/questions/19534896/enforcing-python-version-in-setup-py
import sys
if sys.version_info < (2,7):
sys.exit('Sorry, Python < 2.7 is not supported')
import os
from setuptools import setup
from setuptools.extension import Extension
try:
from Cython.Build import cythonize
except ImportError:
sys.exit("Cython not found. Cython is needed to build the extension modules.")
#########################################################
# Definitions
#########################################################
# Define our base set of compiler and linker flags.
#
# This is geared toward x86_64, see
# https://gcc.gnu.org/onlinedocs/gcc-4.6.4/gcc/i386-and-x86_002d64-Options.html
#
# Customize these as needed.
#
# Note that -O3 may sometimes cause mysterious problems, so we limit ourselves to -O2.
# Modules involving numerical computations
#
extra_compile_args_math_optimized = ['-march=native', '-O2', '-msse', '-msse2', '-mfma', '-mfpmath=sse']
extra_compile_args_math_debug = ['-march=native', '-O0', '-g']
extra_link_args_math_optimized = []
extra_link_args_math_debug = []
# Modules that do not involve numerical computations
#
extra_compile_args_nonmath_optimized = ['-O2']
extra_compile_args_nonmath_debug = ['-O0', '-g']
extra_link_args_nonmath_optimized = []
extra_link_args_nonmath_debug = []
# Additional flags to compile/link with OpenMP
#
openmp_compile_args = ['-fopenmp']
openmp_link_args = ['-fopenmp']
#########################################################
# Helpers
#########################################################
# Make absolute cimports work.
#
# See
# https://github.com/cython/cython/wiki/PackageHierarchy
#
my_include_dirs = ["."]
# Choose the base set of compiler and linker flags.
#
if build_type == 'optimized':
my_extra_compile_args_math = extra_compile_args_math_optimized
my_extra_compile_args_nonmath = extra_compile_args_nonmath_optimized
my_extra_link_args_math = extra_link_args_math_optimized
my_extra_link_args_nonmath = extra_link_args_nonmath_optimized
my_debug = False
print( "build configuration selected: optimized" )
elif build_type == 'debug':
my_extra_compile_args_math = extra_compile_args_math_debug
my_extra_compile_args_nonmath = extra_compile_args_nonmath_debug
my_extra_link_args_math = extra_link_args_math_debug
my_extra_link_args_nonmath = extra_link_args_nonmath_debug
my_debug = True
print( "build configuration selected: debug" )
else:
raise ValueError("Unknown build configuration '%s'; valid: 'optimized', 'debug'" % (build_type))
def declare_cython_extension(extName, use_math=False, use_openmp=False):
"""Declare a Cython extension module for setuptools.
Parameters:
extName : str
Absolute module name, e.g. use `mylibrary.mypackage.mymodule`
for the Cython source file `mylibrary/mypackage/mymodule.pyx`.
use_math : bool
If True, set math flags and link with ``libm``.
use_openmp : bool
If True, compile and link with OpenMP.
Return value:
Extension object
that can be passed to ``setuptools.setup``.
"""
extPath = extName.replace(".", os.path.sep)+".pyx"
if use_math:
compile_args = list(my_extra_compile_args_math) # copy
link_args = list(my_extra_link_args_math)
libraries = ["m"] # link libm; this is a list of library names without the "lib" prefix
else:
compile_args = list(my_extra_compile_args_nonmath)
link_args = list(my_extra_link_args_nonmath)
libraries = None # value if no libraries, see setuptools.extension._Extension
# OpenMP
if use_openmp:
compile_args.insert( 0, openmp_compile_args )
link_args.insert( 0, openmp_link_args )
# See
# http://docs.cython.org/src/tutorial/external.html
#
# on linking libraries to your Cython extensions.
#
return Extension( extName,
[extPath],
extra_compile_args=compile_args,
extra_link_args=link_args,
libraries=libraries
)
# Gather user-defined data files
#
# http://stackoverflow.com/questions/13628979/setuptools-how-to-make-package-contain-extra-data-folder-and-all-folders-inside
#
datafiles = []
getext = lambda filename: os.path.splitext(filename)[1]
for datadir in datadirs:
datafiles.extend( [(root, [os.path.join(root, f) for f in files if getext(f) in dataexts])
for root, dirs, files in os.walk(datadir)] )
# Add example figure to data files
datafiles.append( ('.', ["example.png"]) )
# Add standard documentation (README et al.), if any, to data files
#
detected_docs = []
for docname in standard_docs:
for ext in standard_doc_exts:
filename = "".join( (docname, ext) ) # relative to the directory in which setup.py resides
if os.path.isfile(filename):
detected_docs.append(filename)
datafiles.append( ('.', detected_docs) )
# Extract __version__ from the package __init__.py
# (since it's not a good idea to actually run __init__.py during the build process).
#
# http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
#
import ast
init_py_path = os.path.join(libname, '__init__.py')
version = '0.0.unknown'
try:
with open(init_py_path) as f:
for line in f:
if line.startswith('__version__'):
version = ast.parse(line).body[0].value.s
break
else:
print( "WARNING: Version information not found in '%s', using placeholder '%s'" % (init_py_path, version), file=sys.stderr )
except MyFileNotFoundError:
print( "WARNING: Could not find file '%s', using placeholder version information '%s'" % (init_py_path, version), file=sys.stderr )
#########################################################
# Set up modules
#########################################################
# declare Cython extension modules here
#
ext_module_orderfix = declare_cython_extension( "orderfix.orderfix", use_math=True, use_openmp=False )
# this is mainly to allow a manual logical ordering of the declared modules
#
cython_ext_modules = [ext_module_orderfix]
# Call cythonize() explicitly, as recommended in the Cython documentation. See
# http://cython.readthedocs.io/en/latest/src/reference/compilation.html#compiling-with-distutils
#
# This will favor Cython's own handling of '.pyx' sources over that provided by setuptools.
#
# Note that my_ext_modules is just a list of Extension objects. We could add any C sources (not coming from Cython modules) here if needed.
# cythonize() just performs the Cython-level processing, and returns a list of Extension objects.
#
my_ext_modules = cythonize( cython_ext_modules, include_path=my_include_dirs, gdb_debug=my_debug )
#########################################################
# Call setup()
#########################################################
setup(
name = "orderfix",
version = version,
author = "Juha Jeronen",
author_email = "juha.jeronen@jyu.fi",
url = "https://github.com/Technologicat/orderfix",
description = SHORTDESC,
long_description = DESC,
license = "BSD",
# free-form text field; http://stackoverflow.com/questions/34994130/what-platforms-argument-to-setup-in-setup-py-does
platforms = ["Linux"],
# See
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
#
# for the standard classifiers.
#
# Remember to configure these appropriately for your project, especially license!
#
classifiers = [ "Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
],
# See
# http://setuptools.readthedocs.io/en/latest/setuptools.html
#
setup_requires = ["cython", "numpy"],
install_requires = ["numpy"],
provides = ["orderfix"],
# keywords for PyPI (in case you upload your project)
#
# e.g. the keywords your project uses as topics on GitHub, minus "python" (if there)
#
keywords = [""], # TODO
# All extension modules (list of Extension objects)
#
ext_modules = my_ext_modules,
# Declare packages so that python -m setup build will copy .py files (especially __init__.py).
#
# This **does not** automatically recurse into subpackages, so they must also be declared.
#
packages = ["orderfix"],
# Install also Cython headers so that other Cython modules can cimport ours
#
# Fileglobs relative to each package, **does not** automatically recurse into subpackages.
#
# FIXME: force sdist, but sdist only, to keep the .pyx files (this puts them also in the bdist)
package_data={'orderfix': ['*.pxd', '*.pyx']},
# Disable zip_safe, because:
# - Cython won't find .pxd files inside installed .egg, hard to compile libs depending on this one
# - dynamic loader may need to have the library unzipped to a temporary directory anyway (at import time)
#
zip_safe = False,
# Custom data files not inside a Python package
data_files = datafiles
)
| 35.706371 | 141 | 0.647711 |
7953d2620d0c0f24e66264d86d5d612f57dd9825 | 2,297 | py | Python | zerver/webhooks/travis/tests.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 2 | 2020-11-12T12:28:46.000Z | 2020-11-16T11:17:46.000Z | zerver/webhooks/travis/tests.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 1 | 2021-08-05T14:46:02.000Z | 2021-08-05T14:46:02.000Z | zerver/webhooks/travis/tests.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 1 | 2021-08-05T14:27:13.000Z | 2021-08-05T14:27:13.000Z | import urllib
from zerver.lib.test_classes import WebhookTestCase
class TravisHookTests(WebhookTestCase):
STREAM_NAME = "travis"
URL_TEMPLATE = "/api/v1/external/travis?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = "travis"
TOPIC = "builds"
def test_travis_message(self) -> None:
"""
Build notifications are generated by Travis after build completes.
The subject describes the repo and Stash "project". The
content describes the commits pushed.
"""
expected_message = (
"Author: josh_mandel\nBuild status: Passed :thumbs_up:\n"
"Details: [changes](https://github.com/hl7-fhir/fhir-sv"
"n/compare/6dccb98bcfd9...6c457d366a31), [build log](ht"
"tps://travis-ci.org/hl7-fhir/fhir-svn/builds/92495257)"
)
self.check_webhook(
"build",
self.TOPIC,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_ignore_travis_pull_request_by_default(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
result = self.client_post(
self.url,
self.get_body("pull_request"),
content_type="application/x-www-form-urlencoded",
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertNotEqual(msg.topic_name(), self.TOPIC)
def test_travis_pull_requests_are_not_ignored_when_applicable(self) -> None:
self.url = f"{self.build_webhook_url()}&ignore_pull_requests=false"
expected_message = (
"Author: josh_mandel\nBuild status: Passed :thumbs_up:\n"
"Details: [changes](https://github.com/hl7-fhir/fhir-sv"
"n/compare/6dccb98bcfd9...6c457d366a31), [build log](ht"
"tps://travis-ci.org/hl7-fhir/fhir-svn/builds/92495257)"
)
self.check_webhook(
"pull_request",
self.TOPIC,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def get_body(self, fixture_name: str) -> str:
return urllib.parse.urlencode(
{"payload": self.webhook_fixture_data("travis", fixture_name, file_type="json")}
)
| 35.890625 | 92 | 0.626905 |
7953d269697a37a2692729c2c5d831ecc69509fe | 187 | py | Python | Basics of Classes 3.py | soer7022/hacktoberfest | e7c636bead5309f1e959decce99e745b37735ccd | [
"MIT"
] | null | null | null | Basics of Classes 3.py | soer7022/hacktoberfest | e7c636bead5309f1e959decce99e745b37735ccd | [
"MIT"
] | 2 | 2020-10-13T15:11:57.000Z | 2020-10-13T15:15:01.000Z | Basics of Classes 3.py | soer7022/hacktoberfest | e7c636bead5309f1e959decce99e745b37735ccd | [
"MIT"
] | 11 | 2020-10-06T07:25:06.000Z | 2020-10-25T23:03:38.000Z | class New:
def __init__(self, x,y):
self.x= x
self.new= y
def tr(self):
print(f'hii{self.new}')
point = New(9,10)
print(point.new)
point.tr()
| 15.583333 | 32 | 0.508021 |
7953d32ac520a141741a809a8c62a773be35a056 | 2,276 | py | Python | protopype/find_similar_images.py | sumeetkr/ClusteringSimilarImages | 53689a3c23361c73dde17ca61c21c6aaa37036c0 | [
"Apache-2.0"
] | null | null | null | protopype/find_similar_images.py | sumeetkr/ClusteringSimilarImages | 53689a3c23361c73dde17ca61c21c6aaa37036c0 | [
"Apache-2.0"
] | null | null | null | protopype/find_similar_images.py | sumeetkr/ClusteringSimilarImages | 53689a3c23361c73dde17ca61c21c6aaa37036c0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
from PIL import Image
import six
import imagehash
"""
Demo of hashing
"""
def find_similar_images(userpaths, hashfunc = imagehash.average_hash):
print('finding similar images')
import os
def is_image(filename):
f = filename.lower()
return f.endswith(".png") or f.endswith(".jpg") or \
f.endswith(".jpeg") or f.endswith(".bmp") or f.endswith(".gif") or '.jpg' in f
image_filenames = []
for userpath in userpaths:
image_filenames += [os.path.join(userpath, path) for path in os.listdir(userpath) if is_image(path)]
print(len(image_filenames))
images = {}
for img in sorted(image_filenames):
try:
hash = hashfunc(Image.open(img))
except Exception as e:
print('Problem:', e, 'with', img)
if hash in images:
print(img, ' already exists as', ' '.join(images[hash]))
if 'dupPictures' in img:
print('rm -v', img)
images[hash] = images.get(hash, []) + [img]
print(images)
for k, img_list in six.iteritems(images):
if len(img_list) > 1:
print(" ".join(img_list))
if __name__ == '__main__':
import sys, os
def usage():
sys.stderr.write("""SYNOPSIS: %s [ahash|phash|dhash|...] [<directory>]
Identifies similar images in the directory.
Method:
ahash: Average hash
phash: Perceptual hash
dhash: Difference hash
whash-haar: Haar wavelet hash
whash-db4: Daubechies wavelet hash
(C) Johannes Buchner, 2013-2017
""" % sys.argv[0])
sys.exit(1)
hashmethod = sys.argv[1] if len(sys.argv) > 1 else usage()
if hashmethod == 'ahash':
hashfunc = imagehash.average_hash
elif hashmethod == 'phash':
hashfunc = imagehash.phash
elif hashmethod == 'dhash':
hashfunc = imagehash.dhash
elif hashmethod == 'whash-haar':
hashfunc = imagehash.whash
elif hashmethod == 'whash-db4':
hashfunc = lambda img: imagehash.whash(img, mode='db4')
else:
usage()
userpaths = sys.argv[2:] if len(sys.argv) > 2 else "."
find_similar_images(userpaths=userpaths, hashfunc=hashfunc)
| 29.179487 | 108 | 0.614675 |
7953d4a4609ee610be6b513c08a2b2fff3c93a25 | 13,202 | py | Python | tests/test_simple.py | JoostvDoorn/pywren | 04ded2c2c90ad72dfb40a1ade6def5a2be403f3a | [
"Apache-2.0"
] | 630 | 2017-02-24T17:15:05.000Z | 2022-03-29T23:27:06.000Z | tests/test_simple.py | Scusemua/pywren | 57f8ac9d988fea60df9510fc80c0e44d037d0e8c | [
"Apache-2.0"
] | 234 | 2017-02-24T02:56:58.000Z | 2022-03-06T21:17:13.000Z | tests/test_simple.py | Scusemua/pywren | 57f8ac9d988fea60df9510fc80c0e44d037d0e8c | [
"Apache-2.0"
] | 96 | 2017-02-25T02:17:10.000Z | 2022-03-16T04:30:52.000Z | #
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import time
import boto3
import uuid
import numpy as np
import time
import os
import pywren
import pywren.runtime
import subprocess
import logging
from six.moves import cPickle as pickle
import unittest
import numpy as np
from flaky import flaky
import sys
class SimpleAsync(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_simple(self):
def sum_list(x):
return np.sum(x)
x = np.arange(10)
fut = self.wrenexec.call_async(sum_list, x)
res = fut.result()
self.assertEqual(res, np.sum(x))
def test_simple2(self):
def sum_list(x):
return np.sum(x)
x = np.arange(10)
fut = self.wrenexec.call_async(sum_list, x)
res = fut.result()
self.assertEqual(res, np.sum(x))
def test_exception(self):
"""
Simple exception test
"""
def throwexcept(x):
raise Exception("Throw me out!")
wrenexec = pywren.default_executor()
fut = self.wrenexec.call_async(throwexcept, None)
with pytest.raises(Exception) as execinfo:
res = fut.result()
assert 'Throw me out!' in str(execinfo.value)
def test_exception2(self):
"""
More complex exception
"""
def throw_exception(x):
1 / 0
return 10
wrenexec = pywren.default_executor()
fut = wrenexec.call_async(throw_exception, None)
try:
throw_exception(1)
except Exception as e:
exc_type_true, exc_value_true, exc_traceback_true = sys.exc_info()
try:
fut.result()
except Exception as e:
exc_type_wren, exc_value_wren, exc_traceback_wren = sys.exc_info()
assert exc_type_wren == exc_type_true
assert type(exc_value_wren) == type(exc_value_true)
def test_cancel(self):
def sleep(x):
time.sleep(x)
return 0
fut = self.wrenexec.call_async(sleep, 30)
time.sleep(2)
fut.cancel()
with pytest.raises(Exception) as execinfo:
_ = fut.result()
assert "cancelled" in str(execinfo.value)
def test_exit(self):
"""
what if the process just dies
"""
def just_die(x):
sys.exit(-1)
wrenexec = pywren.default_executor()
fut = wrenexec.call_async(just_die, 1)
with pytest.raises(Exception) as execinfo:
res = fut.result()
assert 'non-zero return code' in str(execinfo.value)
class SimpleMap(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_empty_map(self):
futures = self.wrenexec.map(lambda x: x, [])
res = np.array([f.result() for f in futures])
np.testing.assert_array_equal(res, [])
def test_map(self):
def plus_one(x):
return x + 1
N = 10
x = np.arange(N)
futures = self.wrenexec.map(plus_one, x)
result_count = 0
while result_count < N:
fs_dones, fs_notdones = pywren.wait(futures)
result_count = len(fs_dones)
res = np.array([f.result() for f in futures])
np.testing.assert_array_equal(res, x + 1)
def test_map_doublewait(self):
"""
Make sure we can call wait on a list of futures twice
"""
def plus_one(x):
return x + 1
N = 10
x = np.arange(N)
futures = self.wrenexec.map(plus_one, x)
pywren.wait(futures)
pywren.wait(futures)
res = np.array([f.result() for f in futures])
np.testing.assert_array_equal(res, x + 1)
def test_get_all_results(self):
def plus_one(x):
return x + 1
N = 10
x = np.arange(N)
futures = self.wrenexec.map(plus_one, x)
res = np.array(pywren.get_all_results(futures))
np.testing.assert_array_equal(res, x + 1)
class SimpleReduce(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_reduce(self):
def plus_one(x):
return x + 1
N = 10
x = np.arange(N)
futures = self.wrenexec.map(plus_one, x)
reduce_future = self.wrenexec.reduce(sum, futures)
np.testing.assert_array_equal(reduce_future.result(), 55)
class RuntimeCaching(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
@flaky(max_runs=3)
def test_cached_runtime(self):
"""
Test the runtime caching by manually running with it off
and then running with it on and comparing invocation times.
Note that due to aws lambda internals this might not
do the right thing so we mark it as flaky
"""
def test_add(x):
return x + 7
t1 = time.time()
fut = self.wrenexec.map(test_add, [10], use_cached_runtime=False)[0]
res = fut.result()
t2 = time.time()
non_cached_latency = t2-t1
assert fut.run_status['runtime_cached'] == False
assert res == 17
t1 = time.time()
fut = self.wrenexec.map(test_add, [10], use_cached_runtime=True)[0]
res = fut.result()
t2 = time.time()
cached_latency = t2-t1
assert res == 17
assert fut.run_status['runtime_cached'] == True
assert cached_latency < non_cached_latency
class SerializeFutures(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_map(self):
def plus_one(x):
return x + 1
N = 10
x = np.arange(N)
futures_original = self.wrenexec.map(plus_one, x)
futures_str = pickle.dumps(futures_original)
futures = pickle.loads(futures_str)
result_count = 0
while result_count < N:
fs_dones, fs_notdones = pywren.wait(futures)
result_count = len(fs_dones)
res = np.array([f.result() for f in futures])
np.testing.assert_array_equal(res, x + 1)
class ConfigErrors(unittest.TestCase):
def test_version_mismatch(self):
my_version_str = pywren.runtime.version_str(sys.version_info)
for supported_version in pywren.wrenconfig.default_runtime.keys():
if my_version_str != supported_version:
wrong_version = supported_version
config = pywren.wrenconfig.default()
config['runtime']['s3_key'] = pywren.wrenconfig.default_runtime[wrong_version]
with pytest.raises(Exception) as excinfo:
pywren.lambda_executor(config)
assert 'python version' in str(excinfo.value)
class WaitTest(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_all_complete(self):
def wait_x_sec_and_plus_one(x):
time.sleep(x)
return x + 1
N = 10
x = np.arange(N)
futures = pywren.default_executor().map(wait_x_sec_and_plus_one, x)
fs_dones, fs_notdones = pywren.wait(futures,
return_when=pywren.wren.ALL_COMPLETED)
res = np.array([f.result() for f in fs_dones])
np.testing.assert_array_equal(res, x+1)
def test_any_complete(self):
def wait_x_sec_and_plus_one(x):
time.sleep(x)
return x + 1
N = 10
x = np.arange(N)
futures = pywren.default_executor().map(wait_x_sec_and_plus_one, x)
fs_notdones = futures
while (len(fs_notdones) > 0):
fs_dones, fs_notdones = pywren.wait(fs_notdones,
return_when=pywren.wren.ANY_COMPLETED,
WAIT_DUR_SEC=1)
self.assertTrue(len(fs_dones) > 0)
res = np.array([f.result() for f in futures])
np.testing.assert_array_equal(res, x+1)
def test_multiple_callset_id(self):
def wait_x_sec_and_plus_one(x):
time.sleep(x)
return x + 1
N = 10
x = np.arange(N)
pywx = pywren.default_executor()
futures1 = pywx.map(wait_x_sec_and_plus_one, x)
futures2 = pywx.map(wait_x_sec_and_plus_one, x)
fs_dones, fs_notdones = pywren.wait(futures1 + futures2,
return_when=pywren.wren.ALL_COMPLETED)
res = np.array([f.result() for f in fs_dones])
np.testing.assert_array_equal(res, np.concatenate((x,x))+1)
def test_multiple_callset_id_diff_executors(self):
def wait_x_sec_and_plus_one(x):
time.sleep(x)
return x + 1
N = 10
x = np.arange(N)
futures1 = pywren.default_executor().map(wait_x_sec_and_plus_one, x)
futures2 = pywren.default_executor().map(wait_x_sec_and_plus_one, x)
fs_dones, fs_notdones = pywren.wait(futures1 + futures2,
return_when=pywren.wren.ALL_COMPLETED)
res = np.array([f.result() for f in fs_dones])
np.testing.assert_array_equal(res, np.concatenate((x,x))+1)
# Comment this test out as it doesn't work with the multiple executors (Vaishaal)
# If we need this later we need to do some more monkey patching but is unclear we actually need this
'''
class RuntimePaths(unittest.TestCase):
"""
Test to make sure that we have the correct python and
other utils in our path at runtime
"""
def test_paths(self):
def run_command(x):
return subprocess.check_output(x, shell=True).decode('ascii')
cmd = "conda info"
wrenexec = pywren.default_executor()
fut = wrenexec.call_async(run_command, cmd)
res = fut.result()
assert "Current conda install" in res
'''
class Limits(unittest.TestCase):
"""
Tests basic seatbelts
"""
def test_map_item_limit(self):
TOO_BIG_COUNT = 100
conf = pywren.wrenconfig.default()
if 'scheduler' not in conf:
conf['scheduler'] = {}
conf['scheduler']['map_item_limit'] = TOO_BIG_COUNT
wrenexec = pywren.default_executor(config=conf)
def plus_one(x):
return x + 1
N = 10
x = np.arange(N)
futures = wrenexec.map(plus_one, x)
pywren.get_all_results(futures)
# now too big
with pytest.raises(ValueError) as excinfo:
x = np.arange(TOO_BIG_COUNT+1)
futures = wrenexec.map(plus_one, x )
class EnvVars(unittest.TestCase):
"""
Can we set the environment vars to map?
"""
def test_env(self):
def get_env(_):
return dict(os.environ)
wrenexec = pywren.default_executor()
extra_env = {"HELLO" : "WORLD"}
fut = wrenexec.call_async(get_env, None,
extra_env=extra_env)
res = fut.result()
assert "HELLO" in res.keys()
assert res["HELLO"] == "WORLD"
class Futures(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_succeeded_errored(self):
def sum_list(x):
return np.sum(x)
def sum_error(_):
raise Exception("whaaaa")
x = np.arange(10)
fut = self.wrenexec.call_async(sum_list, x)
assert not fut.succeeded()
assert not fut.errored()
res = fut.result()
self.assertEqual(res, np.sum(x))
assert fut.succeeded()
assert not fut.errored()
fut = self.wrenexec.call_async(sum_error, x)
assert not fut.succeeded()
assert not fut.errored()
with pytest.raises(Exception):
_ = fut.result()
assert not fut.succeeded()
assert fut.errored()
def test_done(self):
"""
Check if done works correctly
"""
def sum_except(x):
s = np.sum(x)
if s >= 1:
raise Exception("whaaaa")
return s
x = np.zeros(10)
fut = self.wrenexec.call_async(sum_except, x)
while not fut.done():
time.sleep(1)
x = np.zeros(10) + 17
fut = self.wrenexec.call_async(sum_except, x)
while not fut.done():
time.sleep(1)
| 26.887984 | 100 | 0.58726 |
7953d4e52373eedae4c939dfa3bc4df196476455 | 13,525 | py | Python | pypy/module/_io/test/test_io.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | pypy/module/_io/test/test_io.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | pypy/module/_io/test/test_io.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 30 | 2018-08-20T03:16:34.000Z | 2022-01-12T17:39:22.000Z | from __future__ import with_statement
from rpython.tool.udir import udir
class AppTestIoModule:
spaceconfig = dict(usemodules=['_io'])
def test_import(self):
import io
def test_iobase(self):
import io
io.IOBase()
class MyFile(io.BufferedIOBase):
def __init__(self, filename):
pass
MyFile("file")
def test_openclose(self):
import io
with io.BufferedIOBase() as f:
assert not f.closed
f._checkClosed()
assert f.closed
raises(ValueError, f._checkClosed)
def test_iter(self):
import io
class MyFile(io.IOBase):
def __init__(self):
self.lineno = 0
def readline(self):
self.lineno += 1
if self.lineno == 1:
return "line1"
elif self.lineno == 2:
return "line2"
return ""
assert list(MyFile()) == ["line1", "line2"]
def test_exception(self):
import _io
e = _io.UnsupportedOperation("seek")
def test_default_implementations(self):
import _io
file = _io._IOBase()
raises(_io.UnsupportedOperation, file.seek, 0, 1)
raises(_io.UnsupportedOperation, file.fileno)
raises(_io.UnsupportedOperation, file.truncate)
def test_blockingerror(self):
import _io
try:
raise _io.BlockingIOError(42, "test blocking", 123)
except IOError as e:
assert isinstance(e, _io.BlockingIOError)
assert e.errno == 42
assert e.strerror == "test blocking"
assert e.characters_written == 123
def test_dict(self):
import _io
f = _io.BytesIO()
f.x = 42
assert f.x == 42
#
def write(data):
try:
data = data.tobytes().upper()
except AttributeError:
data = data.upper()
return _io.BytesIO.write(f, data)
f.write = write
bufio = _io.BufferedWriter(f)
bufio.write("abc")
bufio.flush()
assert f.getvalue() == "ABC"
def test_destructor(self):
import io
io.IOBase()
record = []
class MyIO(io.IOBase):
def __del__(self):
record.append(1)
def close(self):
record.append(2)
super(MyIO, self).close()
def flush(self):
record.append(3)
super(MyIO, self).flush()
MyIO()
import gc; gc.collect()
assert record == [1, 2, 3]
def test_tell(self):
import io
class MyIO(io.IOBase):
def seek(self, pos, whence=0):
return 42
assert MyIO().tell() == 42
def test_weakref(self):
import _io
import weakref
f = _io.BytesIO()
ref = weakref.ref(f)
assert ref() is f
def test_rawio_read(self):
import _io
class MockRawIO(_io._RawIOBase):
stack = ['abc', 'de', '']
def readinto(self, buf):
data = self.stack.pop(0)
buf[:len(data)] = data
return len(data)
assert MockRawIO().read() == 'abcde'
def test_rawio_read_pieces(self):
import _io
class MockRawIO(_io._RawIOBase):
stack = ['abc', 'de', None, 'fg', '']
def readinto(self, buf):
data = self.stack.pop(0)
if data is None:
return None
if len(data) <= len(buf):
buf[:len(data)] = data
return len(data)
else:
buf[:] = data[:len(buf)]
self.stack.insert(0, data[len(buf):])
return len(buf)
r = MockRawIO()
assert r.read(2) == 'ab'
assert r.read(2) == 'c'
assert r.read(2) == 'de'
assert r.read(2) is None
assert r.read(2) == 'fg'
assert r.read(2) == ''
def test_rawio_readall_none(self):
import _io
class MockRawIO(_io._RawIOBase):
read_stack = [None, None, "a"]
def readinto(self, buf):
v = self.read_stack.pop()
if v is None:
return v
buf[:len(v)] = v
return len(v)
r = MockRawIO()
s = r.readall()
assert s =="a"
s = r.readall()
assert s is None
class AppTestOpen:
spaceconfig = dict(usemodules=['_io', '_locale', 'array', 'struct'])
def setup_class(cls):
tmpfile = udir.join('tmpfile').ensure()
cls.w_tmpfile = cls.space.wrap(str(tmpfile))
def test_open(self):
import io
f = io.open(self.tmpfile, "rb")
assert f.name.endswith('tmpfile')
assert f.mode == 'rb'
f.close()
with io.open(self.tmpfile, "rt") as f:
assert f.mode == "rt"
def test_open_writable(self):
import io
f = io.open(self.tmpfile, "w+b")
f.close()
def test_valid_mode(self):
import io
raises(ValueError, io.open, self.tmpfile, "ww")
raises(ValueError, io.open, self.tmpfile, "rwa")
raises(ValueError, io.open, self.tmpfile, "b", newline="\n")
def test_array_write(self):
import _io, array
a = array.array(b'i', range(10))
n = len(a.tostring())
with _io.open(self.tmpfile, "wb", 0) as f:
res = f.write(a)
assert res == n
with _io.open(self.tmpfile, "wb") as f:
res = f.write(a)
assert res == n
def test_attributes(self):
import _io
with _io.open(self.tmpfile, "wb", buffering=0) as f:
assert f.mode == "wb"
with _io.open(self.tmpfile, "U") as f:
assert f.name == self.tmpfile
assert f.buffer.name == self.tmpfile
assert f.buffer.raw.name == self.tmpfile
assert f.mode == "U"
assert f.buffer.mode == "rb"
assert f.buffer.raw.mode == "rb"
with _io.open(self.tmpfile, "w+") as f:
assert f.mode == "w+"
assert f.buffer.mode == "rb+"
assert f.buffer.raw.mode == "rb+"
with _io.open(f.fileno(), "wb", closefd=False) as g:
assert g.mode == "wb"
assert g.raw.mode == "wb"
assert g.name == f.fileno()
assert g.raw.name == f.fileno()
def test_seek_and_tell(self):
import _io
with _io.open(self.tmpfile, "wb") as f:
f.write("abcd")
with _io.open(self.tmpfile) as f:
decoded = f.read()
# seek positions
for i in xrange(len(decoded) + 1):
# read lenghts
for j in [1, 5, len(decoded) - i]:
with _io.open(self.tmpfile) as f:
res = f.read(i)
assert res == decoded[:i]
cookie = f.tell()
res = f.read(j)
assert res == decoded[i:i + j]
f.seek(cookie)
res = f.read()
assert res == decoded[i:]
def test_telling(self):
import _io
with _io.open(self.tmpfile, "w+", encoding="utf8") as f:
p0 = f.tell()
f.write(u"\xff\n")
p1 = f.tell()
f.write(u"\xff\n")
p2 = f.tell()
f.seek(0)
assert f.tell() == p0
res = f.readline()
assert res == u"\xff\n"
assert f.tell() == p1
res = f.readline()
assert res == u"\xff\n"
assert f.tell() == p2
f.seek(0)
for line in f:
assert line == u"\xff\n"
raises(IOError, f.tell)
assert f.tell() == p2
def test_chunk_size(self):
import _io
with _io.open(self.tmpfile) as f:
assert f._CHUNK_SIZE >= 1
f._CHUNK_SIZE = 4096
assert f._CHUNK_SIZE == 4096
raises(ValueError, setattr, f, "_CHUNK_SIZE", 0)
def test_truncate(self):
import _io
with _io.open(self.tmpfile, "w+") as f:
f.write(u"abc")
with _io.open(self.tmpfile, "w+") as f:
f.truncate()
with _io.open(self.tmpfile, "r+") as f:
res = f.read()
assert res == ""
def test_errors_property(self):
import _io
with _io.open(self.tmpfile, "w") as f:
assert f.errors == "strict"
with _io.open(self.tmpfile, "w", errors="replace") as f:
assert f.errors == "replace"
def test_append_bom(self):
import _io
# The BOM is not written again when appending to a non-empty file
for charset in ["utf-8-sig", "utf-16", "utf-32"]:
with _io.open(self.tmpfile, "w", encoding=charset) as f:
f.write(u"aaa")
pos = f.tell()
with _io.open(self.tmpfile, "rb") as f:
res = f.read()
assert res == "aaa".encode(charset)
with _io.open(self.tmpfile, "a", encoding=charset) as f:
f.write(u"xxx")
with _io.open(self.tmpfile, "rb") as f:
res = f.read()
assert res == "aaaxxx".encode(charset)
def test_newlines_attr(self):
import _io
with _io.open(self.tmpfile, "r") as f:
assert f.newlines is None
with _io.open(self.tmpfile, "wb") as f:
f.write("hello\nworld\n")
with _io.open(self.tmpfile, "r") as f:
res = f.readline()
assert res == "hello\n"
res = f.readline()
assert res == "world\n"
assert f.newlines == "\n"
assert type(f.newlines) is unicode
def test_mod(self):
import _io
typemods = dict((t, t.__module__) for t in vars(_io).values()
if isinstance(t, type))
for t, mod in typemods.items():
if t is _io.BlockingIOError:
assert mod == '__builtin__'
elif t is _io.UnsupportedOperation:
assert mod == 'io'
else:
assert mod == '_io'
def test_issue1902(self):
import _io
with _io.open(self.tmpfile, 'w+b', 4096) as f:
f.write(b'\xff' * 13569)
f.flush()
f.seek(0, 0)
f.read(1)
f.seek(-1, 1)
f.write(b'')
def test_issue1902_2(self):
import _io
with _io.open(self.tmpfile, 'w+b', 4096) as f:
f.write(b'\xff' * 13569)
f.flush()
f.seek(0, 0)
f.read(1)
f.seek(-1, 1)
f.write(b'\xff')
f.seek(1, 0)
f.read(4123)
f.seek(-4123, 1)
def test_issue1902_3(self):
import _io
buffer_size = 4096
with _io.open(self.tmpfile, 'w+b', buffer_size) as f:
f.write(b'\xff' * buffer_size * 3)
f.flush()
f.seek(0, 0)
f.read(1)
f.seek(-1, 1)
f.write(b'\xff')
f.seek(1, 0)
f.read(buffer_size * 2)
assert f.tell() == 1 + buffer_size * 2
class AppTestIoAferClose:
spaceconfig = dict(usemodules=['_io'])
def setup_class(cls):
tmpfile = udir.join('tmpfile').ensure()
cls.w_tmpfile = cls.space.wrap(str(tmpfile))
def test_io_after_close(self):
import _io
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
print kwargs
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "ascii"
f = _io.open(self.tmpfile, **kwargs)
f.close()
raises(ValueError, f.flush)
raises(ValueError, f.fileno)
raises(ValueError, f.isatty)
raises(ValueError, f.__iter__)
if hasattr(f, "peek"):
raises(ValueError, f.peek, 1)
raises(ValueError, f.read)
if hasattr(f, "read1"):
raises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
raises(ValueError, f.readall)
if hasattr(f, "readinto"):
raises(ValueError, f.readinto, bytearray(1024))
raises(ValueError, f.readline)
raises(ValueError, f.readlines)
raises(ValueError, f.seek, 0)
raises(ValueError, f.tell)
raises(ValueError, f.truncate)
raises(ValueError, f.write, b"" if "b" in kwargs['mode'] else u"")
raises(ValueError, f.writelines, [])
raises(ValueError, next, f)
| 30.257271 | 78 | 0.478447 |
7953d52c80347444ecae94c7d8ec4726bcb1dcbb | 10,156 | py | Python | tensorlayer/layers/normalization.py | awesome-archive/tensorlayer | 120a79f957926475b6f3db02da71a269f8130771 | [
"Apache-2.0"
] | null | null | null | tensorlayer/layers/normalization.py | awesome-archive/tensorlayer | 120a79f957926475b6f3db02da71a269f8130771 | [
"Apache-2.0"
] | null | null | null | tensorlayer/layers/normalization.py | awesome-archive/tensorlayer | 120a79f957926475b6f3db02da71a269f8130771 | [
"Apache-2.0"
] | 1 | 2018-03-12T23:57:57.000Z | 2018-03-12T23:57:57.000Z | # -*- coding: utf-8 -*-
from .core import *
class LocalResponseNormLayer(Layer):
"""The :class:`LocalResponseNormLayer` layer is for Local Response Normalization.
See ``tf.nn.local_response_normalization`` or ``tf.nn.lrn`` for new TF version.
The 4-D input tensor is a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently.
Within a given vector, each component is divided by the weighted square-sum of inputs within depth_radius.
Parameters
-----------
layer : :class:`Layer`
The previous layer with a 4D output shape.
depth_radius : int
Depth radius. 0-D. Half-width of the 1-D normalization window.
bias : float
An offset which is usually positive and shall avoid dividing by 0.
alpha : float
A scale factor which is usually positive.
beta : float
An exponent.
name : str
A unique layer name.
"""
def __init__(
self,
layer,
depth_radius=None,
bias=None,
alpha=None,
beta=None,
name='lrn_layer',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
logging.info("LocalResponseNormLayer %s: depth_radius: %d, bias: %f, alpha: %f, beta: %f" % (self.name, depth_radius, bias, alpha, beta))
with tf.variable_scope(name):
self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_layers.extend([self.outputs])
class BatchNormLayer(Layer):
"""
The :class:`BatchNormLayer` is a batch normalization layer for both fully-connected and convolution outputs.
See ``tf.nn.batch_normalization`` and ``tf.nn.moments``.
Parameters
----------
layer : :class:`Layer`
The previous layer.
decay : float
A decay factor for `ExponentialMovingAverage`.
Suggest to use a large value for large dataset.
epsilon : float
Eplison.
act : activation function
The activation function of this layer.
is_train : boolean
Is being used for training or inference.
beta_init : initializer
The initializer for initializing beta.
gamma_init : initializer
The initializer for initializing gamma.
dtype : TensorFlow dtype
tf.float32 (default) or tf.float16.
name : str
A unique layer name.
References
----------
- `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`__
- `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`__
"""
def __init__(
self,
layer,
decay=0.9,
epsilon=0.00001,
act=tf.identity,
is_train=False,
beta_init=tf.zeros_initializer,
gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002),
name='batchnorm_layer',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (self.name, decay, epsilon, act.__name__, is_train))
x_shape = self.inputs.get_shape()
params_shape = x_shape[-1:]
from tensorflow.python.training import moving_averages
with tf.variable_scope(name):
axis = list(range(len(x_shape) - 1))
# 1. beta, gamma
if tf.__version__ > '0.12.1' and beta_init == tf.zeros_initializer:
beta_init = beta_init()
beta = tf.get_variable('beta', shape=params_shape, initializer=beta_init, dtype=D_TYPE, trainable=is_train)
gamma = tf.get_variable(
'gamma',
shape=params_shape,
initializer=gamma_init,
dtype=D_TYPE,
trainable=is_train,
)
# 2.
if tf.__version__ > '0.12.1':
moving_mean_init = tf.zeros_initializer()
else:
moving_mean_init = tf.zeros_initializer
moving_mean = tf.get_variable('moving_mean', params_shape, initializer=moving_mean_init, dtype=D_TYPE, trainable=False)
moving_variance = tf.get_variable(
'moving_variance',
params_shape,
initializer=tf.constant_initializer(1.),
dtype=D_TYPE,
trainable=False,
)
# 3.
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(self.inputs, axis)
try: # TF12
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=False) # if zero_debias=True, has bias
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False) # if zero_debias=True, has bias
# logging.info("TF12 moving")
except Exception: # TF11
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)
# logging.info("TF11 moving")
def mean_var_with_update():
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
return tf.identity(mean), tf.identity(variance)
if is_train:
mean, var = mean_var_with_update()
self.outputs = act(tf.nn.batch_normalization(self.inputs, mean, var, beta, gamma, epsilon))
else:
self.outputs = act(tf.nn.batch_normalization(self.inputs, moving_mean, moving_variance, beta, gamma, epsilon))
variables = [beta, gamma, moving_mean, moving_variance]
# logging.info(len(variables))
# for idx, v in enumerate(variables):
# logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v))
# exit()
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_layers.extend([self.outputs])
self.all_params.extend(variables)
class InstanceNormLayer(Layer):
"""The :class:`InstanceNormLayer` class is a for instance normalization.
Parameters
-----------
layer : :class:`Layer`
The previous layer.
act : activation function.
The activation function of this layer.
epsilon : float
Eplison.
name : str
A unique layer name
"""
def __init__(
self,
layer,
act=tf.identity,
epsilon=1e-5,
name='instan_norm',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
logging.info("InstanceNormLayer %s: epsilon:%f act:%s" % (self.name, epsilon, act.__name__))
with tf.variable_scope(name) as vs:
mean, var = tf.nn.moments(self.inputs, [1, 2], keep_dims=True)
scale = tf.get_variable('scale', [self.inputs.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02), dtype=D_TYPE)
offset = tf.get_variable('offset', [self.inputs.get_shape()[-1]], initializer=tf.constant_initializer(0.0), dtype=D_TYPE)
self.outputs = scale * tf.div(self.inputs - mean, tf.sqrt(var + epsilon)) + offset
self.outputs = act(self.outputs)
variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_layers.extend([self.outputs])
self.all_params.extend(variables)
class LayerNormLayer(Layer):
"""
The :class:`LayerNormLayer` class is for layer normalization, see `tf.contrib.layers.layer_norm <https://www.tensorflow.org/api_docs/python/tf/contrib/layers/layer_norm>`__.
Parameters
----------
layer : :class:`Layer`
The previous layer.
act : activation function
The activation function of this layer.
others : _
`tf.contrib.layers.layer_norm <https://www.tensorflow.org/api_docs/python/tf/contrib/layers/layer_norm>`__.
"""
def __init__(self,
layer,
center=True,
scale=True,
act=tf.identity,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
begin_norm_axis=1,
begin_params_axis=-1,
name='layernorm'):
if tf.__version__ < "1.3":
raise Exception("Please use TF 1.3+")
Layer.__init__(self, name=name)
self.inputs = layer.outputs
logging.info("LayerNormLayer %s: act:%s" % (self.name, act.__name__))
with tf.variable_scope(name) as vs:
self.outputs = tf.contrib.layers.layer_norm(
self.inputs,
center=center,
scale=scale,
activation_fn=act,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis,
scope='var',
)
variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_layers.extend([self.outputs])
self.all_params.extend(variables)
| 38.037453 | 177 | 0.604766 |
7953d5fe533f17e515b79510955f8974d000fbae | 12,783 | py | Python | doc/user-manual/conf.py | Blaisorblade/Agda | 802a28aa8374f15fe9d011ceb80317fdb1ec0949 | [
"BSD-3-Clause"
] | 3 | 2015-03-28T14:51:03.000Z | 2015-12-07T20:14:00.000Z | doc/user-manual/conf.py | Blaisorblade/Agda | 802a28aa8374f15fe9d011ceb80317fdb1ec0949 | [
"BSD-3-Clause"
] | null | null | null | doc/user-manual/conf.py | Blaisorblade/Agda | 802a28aa8374f15fe9d011ceb80317fdb1ec0949 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Agda documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 5 20:41:51 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# If your change the version here also change it in the
# `requirements.txt` file [Issue #1936].
needs_sphinx = '1.5.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.imgmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.lagda.rst','.rst']
# The encoding of source files.
#
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Agda'
copyright = u'2016, Ulf Norell, Andreas Abel, Nils Anders Danielsson, Makoto Takeyama, Catarina Coquand, with contributions by Stevan Andjelkovic, Marcin Benke, Jean-Philippe Bernardy, James Chapman, Jesper Cockx, Dominique Devriese, Peter Divanski, Fredrik Nordvall Forsberg, Olle Fredriksson, Daniel Gustafsson, Philipp Hausmann, Patrik Jansson, Alan Jeffrey, Wolfram Kahl, Fredrik Lindblad, Francesco Mazzoli, Stefan Monnier, Darin Morrison, Guilhem Moulin, Nicolas Pouillard, Andrés Sicard-Ramírez, Andrea Vezzosi, and many more.'
author = u'The Agda Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.5.3'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'Agda'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Agda v2.5.3'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Agdadoc'
# -- Options for LaTeX output ---------------------------------------------
# See Issue #1996.
# latex_engine = 'xelatex'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': r'''
\ifxetex
\usepackage{fontspec}
\setmonofont
[ BoldFont = DejaVuSansMono-Bold.ttf,
ItalicFont = DejaVuSansMono-Oblique.ttf,
BoldItalicFont = DejaVuSansMono-BoldOblique.ttf,
Scale = MatchLowercase,
]
{DejaVuSansMono.ttf}
\fi
'''
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Agda.tex', u'Agda Documentation', u'The Agda Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'agda', 'Agda Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Agda', 'Agda Documentation',
author, 'Agda', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
| 28.790541 | 534 | 0.704451 |
7953d615d03fc3f6770a98ca34b2919aeaf97037 | 2,606 | py | Python | tests/download_test_images.py | imi-bigpicture/opentile | 1a84284c5bc2c3515e14d5345b6077842897b547 | [
"Apache-2.0"
] | 4 | 2021-12-02T17:19:10.000Z | 2022-02-02T16:35:48.000Z | tests/download_test_images.py | sectra-medical/opentile | 1a84284c5bc2c3515e14d5345b6077842897b547 | [
"Apache-2.0"
] | 6 | 2021-12-02T13:22:04.000Z | 2022-03-09T14:01:19.000Z | tests/download_test_images.py | sectra-medical/opentile | 1a84284c5bc2c3515e14d5345b6077842897b547 | [
"Apache-2.0"
] | 4 | 2022-02-04T08:24:20.000Z | 2022-02-16T12:39:39.000Z | # Copyright 2022 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import requests
from hashlib import md5
SVS_PATH = 'slides/svs/CMU-1/CMU-1.svs'
SVS_URL = 'https://openslide.cs.cmu.edu/download/openslide-testdata/Aperio/CMU-1.svs'
SVS_MD5 = '751b0b86a3c5ff4dfc8567cf24daaa85'
NDPI_PATH = 'slides/ndpi/CMU-1/CMU-1.ndpi'
NDPI_URL = 'https://openslide.cs.cmu.edu/download/openslide-testdata/Hamamatsu/CMU-1.ndpi'
NDPI_MD5 = 'fb89dea54f85fb112e418a3cf4c7888a'
DEFAULT_DIR = 'testdata'
DOWNLOAD_CHUNK_SIZE=8192
def download_file(url: str, filename: Path):
with requests.get(url, stream=True) as request:
request.raise_for_status()
with open(filename, 'wb') as file:
for chunk in request.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
file.write(chunk)
def main():
print("Downloading and/or checking testdata from openslide.")
test_data_path = os.environ.get("OPENTILE_TESTDIR")
if test_data_path is None:
test_data_dir = Path(DEFAULT_DIR)
print(
"Env 'OPENTILE_TESTDIR' not set, downloading to default folder "
f"{test_data_dir}."
)
else:
test_data_dir = Path(test_data_path)
print(f"Downloading to {test_data_dir}")
os.makedirs(test_data_dir, exist_ok=True)
files = {
test_data_dir.joinpath(SVS_PATH): (SVS_URL, SVS_MD5),
test_data_dir.joinpath(NDPI_PATH): (NDPI_URL, NDPI_MD5)
}
for file, (url, checksum) in files.items():
if not file.exists():
print(f"{file} not found, downloading from {url}")
os.makedirs(file.parent, exist_ok=True)
download_file(url, file)
else:
print(f"{file} found, skipping download")
with open(file, 'rb') as saved_file:
data = saved_file.read()
if not checksum == md5(data).hexdigest():
raise ValueError(f"Checksum faild for {file}")
else:
print(f"{file} checksum OK")
if __name__ == "__main__":
main() | 38.323529 | 90 | 0.670376 |
7953d6327cf2a8243ca7efa2bc44a3dab3826773 | 4,516 | py | Python | config/settings/production.py | DustinHolden/challenge | 7c62acb965600a9532aef35eb02545872cf39cc7 | [
"MIT"
] | null | null | null | config/settings/production.py | DustinHolden/challenge | 7c62acb965600a9532aef35eb02545872cf39cc7 | [
"MIT"
] | null | null | null | config/settings/production.py | DustinHolden/challenge | 7c62acb965600a9532aef35eb02545872cf39cc7 | [
"MIT"
] | null | null | null | from .common import * # noqa
from .common import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True
)
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]['OPTIONS']['loaders'] = \
[
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL', default='no-reply <noreply@example.com>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
'DJANGO_EMAIL_SUBJECT_PREFIX', default='[challenge-prefix]'
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
}
},
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
# Errors logged by the SDK itself
'sentry_sdk': {'level': 'ERROR', 'handlers': ['console'], 'propagate': False},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
},
}
| 37.016393 | 87 | 0.579717 |
7953d650636be17aec36a20133846faec5f03c89 | 67,029 | py | Python | venv/lib/python3.8/site-packages/pandas/tests/dtypes/test_inference.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 3 | 2018-04-24T13:31:51.000Z | 2019-07-09T07:31:43.000Z | venv/lib/python3.8/site-packages/pandas/tests/dtypes/test_inference.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 5 | 2022-02-13T14:38:04.000Z | 2022-02-15T00:13:07.000Z | venv/lib/python3.8/site-packages/pandas/tests/dtypes/test_inference.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 5 | 2018-04-24T13:31:56.000Z | 2021-10-21T05:06:23.000Z | """
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
import collections
from collections import namedtuple
from datetime import (
date,
datetime,
time,
timedelta,
)
from decimal import Decimal
from fractions import Fraction
from io import StringIO
import itertools
from numbers import Number
import re
import numpy as np
import pytest
import pytz
from pandas._libs import (
lib,
missing as libmissing,
ops as libops,
)
import pandas.util._test_decorators as td
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
ensure_int32,
is_bool,
is_complex,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_number,
is_scalar,
is_scipy_sparse,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DateOffset,
DatetimeIndex,
Index,
Interval,
Period,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import (
BooleanArray,
FloatingArray,
IntegerArray,
)
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
class MockNumpyLikeArray:
"""
A class which is numpy-like (e.g. Pint's Quantity) but not actually numpy
The key is that it is not actually a numpy array so
``util.is_array(mock_numpy_like_array_instance)`` returns ``False``. Other
important properties are that the class defines a :meth:`__iter__` method
(so that ``isinstance(abc.Iterable)`` returns ``True``) and has a
:meth:`ndim` property, as pandas special-cases 0-dimensional arrays in some
cases.
We expect pandas to behave with respect to such duck arrays exactly as
with real numpy arrays. In particular, a 0-dimensional duck array is *NOT*
a scalar (`is_scalar(np.array(1)) == False`), but it is not list-like either.
"""
def __init__(self, values):
self._values = values
def __iter__(self):
iter_values = iter(self._values)
def it_outer():
yield from iter_values
return it_outer()
def __len__(self):
return len(self._values)
def __array__(self, t=None):
return np.asarray(self._values, dtype=t)
@property
def ndim(self):
return self._values.ndim
@property
def dtype(self):
return self._values.dtype
@property
def size(self):
return self._values.size
@property
def shape(self):
return self._values.shape
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, "list"),
([], True, "list-empty"),
((1,), True, "tuple"),
((), True, "tuple-empty"),
({"a": 1}, True, "dict"),
({}, True, "dict-empty"),
({"a", 1}, "set", "set"),
(set(), "set", "set-empty"),
(frozenset({"a", 1}), "set", "frozenset"),
(frozenset(), "set", "frozenset-empty"),
(iter([1, 2]), True, "iterator"),
(iter([]), True, "iterator-empty"),
((x for x in [1, 2]), True, "generator"),
((_ for _ in []), True, "generator-empty"),
(Series([1]), True, "Series"),
(Series([], dtype=object), True, "Series-empty"),
(Series(["a"]).str, True, "StringMethods"),
(Series([], dtype="O").str, True, "StringMethods-empty"),
(Index([1]), True, "Index"),
(Index([]), True, "Index-empty"),
(DataFrame([[1]]), True, "DataFrame"),
(DataFrame(), True, "DataFrame-empty"),
(np.ndarray((2,) * 1), True, "ndarray-1d"),
(np.array([]), True, "ndarray-1d-empty"),
(np.ndarray((2,) * 2), True, "ndarray-2d"),
(np.array([[]]), True, "ndarray-2d-empty"),
(np.ndarray((2,) * 3), True, "ndarray-3d"),
(np.array([[[]]]), True, "ndarray-3d-empty"),
(np.ndarray((2,) * 4), True, "ndarray-4d"),
(np.array([[[[]]]]), True, "ndarray-4d-empty"),
(np.array(2), False, "ndarray-0d"),
(MockNumpyLikeArray(np.ndarray((2,) * 1)), True, "duck-ndarray-1d"),
(MockNumpyLikeArray(np.array([])), True, "duck-ndarray-1d-empty"),
(MockNumpyLikeArray(np.ndarray((2,) * 2)), True, "duck-ndarray-2d"),
(MockNumpyLikeArray(np.array([[]])), True, "duck-ndarray-2d-empty"),
(MockNumpyLikeArray(np.ndarray((2,) * 3)), True, "duck-ndarray-3d"),
(MockNumpyLikeArray(np.array([[[]]])), True, "duck-ndarray-3d-empty"),
(MockNumpyLikeArray(np.ndarray((2,) * 4)), True, "duck-ndarray-4d"),
(MockNumpyLikeArray(np.array([[[[]]]])), True, "duck-ndarray-4d-empty"),
(MockNumpyLikeArray(np.array(2)), False, "duck-ndarray-0d"),
(1, False, "int"),
(b"123", False, "bytes"),
(b"", False, "bytes-empty"),
("123", False, "string"),
("", False, "string-empty"),
(str, False, "string-type"),
(object(), False, "object"),
(np.nan, False, "NaN"),
(None, False, "None"),
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == "set" else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == "set" else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_list_like_recursion():
# GH 33721
# interpreter would crash with SIGABRT
def foo():
inference.is_list_like([])
foo()
with tm.external_error_raised(RecursionError):
foo()
def test_is_list_like_iter_is_none():
# GH 43373
# is_list_like was yielding false positives with __iter__ == None
class NotListLike:
def __getitem__(self, item):
return self
__iter__ = None
assert not inference.is_list_like(NotListLike())
def test_is_sequence():
is_seq = inference.is_sequence
assert is_seq((1, 2))
assert is_seq([1, 2])
assert not is_seq("abcd")
assert not is_seq(np.int64)
class A:
def __getitem__(self):
return 1
assert not is_seq(A())
def test_is_array_like():
assert inference.is_array_like(Series([], dtype=object))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
assert inference.is_array_like(np.array([2, 3]))
assert inference.is_array_like(MockNumpyLikeArray(np.array([2, 3])))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize(
"inner",
[
[],
[1],
(1,),
(1, 2),
{"a": 1},
{1, "a"},
Series([1]),
Series([], dtype=object),
Series(["a"]).str,
(x for x in range(5)),
],
)
@pytest.mark.parametrize("outer", [list, Series, np.array, tuple])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize(
"obj",
[
"abc",
[],
[1],
(1,),
["a"],
"a",
{"a"},
[1, 2, 3],
Series([1]),
DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
],
)
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize("ll", [{}, {"A": 1}, Series([1]), collections.defaultdict()])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll",
[
"1",
1,
[1, 2],
(1, 2),
range(2),
Index([1]),
dict,
collections.defaultdict,
Series,
],
)
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike:
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key) -> bool:
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like():
class MockFile:
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
test_tuple = collections.namedtuple("test_tuple", ["a", "b", "c"])
@pytest.mark.parametrize("ll", [test_tuple(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize("ll", [(1, 2, 3), "a", Series({"pi": 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass:
pass
class UnhashableClass1:
__hash__ = None
class UnhashableClass2:
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1, 3.14, np.float64(3.14), "a", (), (1,), HashableClass())
not_hashable = ([], UnhashableClass1())
abc_hashable_not_really_hashable = (([],), UnhashableClass2())
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.abc.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
@pytest.mark.parametrize("ll", [re.compile("ad")])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize("ll", ["x", 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r"a", "x", r"asdf", re.compile("adsf"), r"\u2233\s*", re.compile(r"")]
)
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize("ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference:
@pytest.mark.parametrize(
"arr",
[
np.array(list("abc"), dtype="S1"),
np.array(list("abc"), dtype="S1").astype(object),
[b"a", np.nan, b"c"],
],
)
def test_infer_dtype_bytes(self, arr):
result = lib.infer_dtype(arr, skipna=True)
assert result == "bytes"
@pytest.mark.parametrize(
"value, expected",
[
(float("inf"), True),
(np.inf, True),
(-np.inf, False),
(1, False),
("a", False),
],
)
def test_isposinf_scalar(self, value, expected):
# GH 11352
result = libmissing.isposinf_scalar(value)
assert result is expected
@pytest.mark.parametrize(
"value, expected",
[
(float("-inf"), True),
(-np.inf, True),
(np.inf, False),
(1, False),
("a", False),
],
)
def test_isneginf_scalar(self, value, expected):
result = libmissing.isneginf_scalar(value)
assert result is expected
@pytest.mark.parametrize(
"convert_to_masked_nullable, exp",
[
(
True,
BooleanArray(
np.array([True, False], dtype="bool"), np.array([False, True])
),
),
(False, np.array([True, np.nan], dtype="object")),
],
)
def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp):
# GH 40687
arr = np.array([True, np.NaN], dtype=object)
result = libops.maybe_convert_bool(
arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
)
if convert_to_masked_nullable:
tm.assert_extension_array_equal(BooleanArray(*result), exp)
else:
result = result[0]
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
@pytest.mark.parametrize("coerce_numeric", [True, False])
@pytest.mark.parametrize(
"infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
)
@pytest.mark.parametrize("prefix", ["", "-", "+"])
def test_maybe_convert_numeric_infinities(
self, coerce_numeric, infinity, prefix, convert_to_masked_nullable
):
# see gh-13274
result, _ = lib.maybe_convert_numeric(
np.array([prefix + infinity], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=coerce_numeric,
convert_to_masked_nullable=convert_to_masked_nullable,
)
expected = np.array([np.inf if prefix in ["", "+"] else -np.inf])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable):
msg = "Unable to parse string"
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(["foo_inf"], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=False,
convert_to_masked_nullable=convert_to_masked_nullable,
)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_maybe_convert_numeric_post_floatify_nan(
self, coerce, convert_to_masked_nullable
):
# see gh-13314
data = np.array(["1.200", "-999.000", "4.500"], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(
data,
nan_values,
coerce,
convert_to_masked_nullable=convert_to_masked_nullable,
)
if convert_to_masked_nullable:
expected = FloatingArray(expected, np.isnan(expected))
tm.assert_extension_array_equal(expected, FloatingArray(*out))
else:
out = out[0]
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(["inf", "inf", "inf"], dtype="O")
result, _ = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(["-inf", "-inf", "-inf"], dtype="O")
result, _ = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(["42E", "2E", "99e", "6e"], dtype="O")
result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object)
result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2 ** 63], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
arr = np.array([str(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
arr = np.array([np.uint64(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
@pytest.mark.parametrize(
"arr",
[
np.array([2 ** 63, np.nan], dtype=object),
np.array([str(2 ** 63), np.nan], dtype=object),
np.array([np.nan, 2 ** 63], dtype=object),
np.array([np.nan, str(2 ** 63)], dtype=object),
],
)
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_uint64_nan_values(
self, coerce, convert_to_masked_nullable
):
arr = np.array([2 ** 63, 2 ** 63 + 1], dtype=object)
na_values = {2 ** 63}
expected = (
np.array([np.nan, 2 ** 63 + 1], dtype=float) if coerce else arr.copy()
)
result = lib.maybe_convert_numeric(
arr,
na_values,
coerce_numeric=coerce,
convert_to_masked_nullable=convert_to_masked_nullable,
)
if convert_to_masked_nullable and coerce:
expected = IntegerArray(
np.array([0, 2 ** 63 + 1], dtype="u8"),
np.array([True, False], dtype="bool"),
)
result = IntegerArray(*result)
else:
result = result[0] # discard mask
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"case",
[
np.array([2 ** 63, -1], dtype=object),
np.array([str(2 ** 63), -1], dtype=object),
np.array([str(2 ** 63), str(-1)], dtype=object),
np.array([-1, 2 ** 63], dtype=object),
np.array([-1, str(2 ** 63)], dtype=object),
np.array([str(-1), str(2 ** 63)], dtype=object),
],
)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_int64_uint64(
self, case, coerce, convert_to_masked_nullable
):
expected = case.astype(float) if coerce else case.copy()
result, _ = lib.maybe_convert_numeric(
case,
set(),
coerce_numeric=coerce,
convert_to_masked_nullable=convert_to_masked_nullable,
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_string_uint64(self, convert_to_masked_nullable):
# GH32394
result = lib.maybe_convert_numeric(
np.array(["uint64"], dtype=object),
set(),
coerce_numeric=True,
convert_to_masked_nullable=convert_to_masked_nullable,
)
if convert_to_masked_nullable:
result = FloatingArray(*result)
else:
result = result[0]
assert np.isnan(result)
@pytest.mark.parametrize("value", [-(2 ** 63) - 1, 2 ** 64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2 ** 63], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2 ** 63, -1], dtype=object)
exp = np.array([2 ** 63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_maybe_convert_objects_datetime(self):
# GH27438
arr = np.array(
[np.datetime64("2000-01-01"), np.timedelta64(1, "s")], dtype=object
)
exp = arr.copy()
out = lib.maybe_convert_objects(
arr, convert_datetime=True, convert_timedelta=True
)
tm.assert_numpy_array_equal(out, exp)
arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object)
exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]")
out = lib.maybe_convert_objects(
arr, convert_datetime=True, convert_timedelta=True
)
tm.assert_numpy_array_equal(out, exp)
# with convert_timedelta=True, the nan is a valid NA value for td64
arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object)
exp = exp[::-1]
out = lib.maybe_convert_objects(
arr, convert_datetime=True, convert_timedelta=True
)
tm.assert_numpy_array_equal(out, exp)
def test_maybe_convert_objects_dtype_if_all_nat(self):
arr = np.array([pd.NaT, pd.NaT], dtype=object)
out = lib.maybe_convert_objects(
arr, convert_datetime=True, convert_timedelta=True
)
# no dtype_if_all_nat passed -> we dont guess
tm.assert_numpy_array_equal(out, arr)
out = lib.maybe_convert_objects(
arr,
convert_datetime=True,
convert_timedelta=True,
dtype_if_all_nat=np.dtype("timedelta64[ns]"),
)
exp = np.array(["NaT", "NaT"], dtype="timedelta64[ns]")
tm.assert_numpy_array_equal(out, exp)
out = lib.maybe_convert_objects(
arr,
convert_datetime=True,
convert_timedelta=True,
dtype_if_all_nat=np.dtype("datetime64[ns]"),
)
exp = np.array(["NaT", "NaT"], dtype="datetime64[ns]")
tm.assert_numpy_array_equal(out, exp)
def test_maybe_convert_objects_dtype_if_all_nat_invalid(self):
# we accept datetime64[ns], timedelta64[ns], and EADtype
arr = np.array([pd.NaT, pd.NaT], dtype=object)
with pytest.raises(ValueError, match="int64"):
lib.maybe_convert_objects(
arr,
convert_datetime=True,
convert_timedelta=True,
dtype_if_all_nat=np.dtype("int64"),
)
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_maybe_convert_objects_datetime_overflow_safe(self, dtype):
stamp = datetime(2363, 10, 4) # Enterprise-D launch date
if dtype == "timedelta64[ns]":
stamp = stamp - datetime(1970, 1, 1)
arr = np.array([stamp], dtype=object)
out = lib.maybe_convert_objects(
arr, convert_datetime=True, convert_timedelta=True
)
# no OutOfBoundsDatetime/OutOfBoundsTimedeltas
tm.assert_numpy_array_equal(out, arr)
def test_maybe_convert_objects_mixed_datetimes(self):
ts = Timestamp("now")
vals = [ts, ts.to_pydatetime(), ts.to_datetime64(), pd.NaT, np.nan, None]
for data in itertools.permutations(vals):
data = np.array(list(data), dtype=object)
expected = DatetimeIndex(data)._data._ndarray
result = lib.maybe_convert_objects(data, convert_datetime=True)
tm.assert_numpy_array_equal(result, expected)
def test_maybe_convert_objects_timedelta64_nat(self):
obj = np.timedelta64("NaT", "ns")
arr = np.array([obj], dtype=object)
assert arr[0] is obj
result = lib.maybe_convert_objects(arr, convert_timedelta=True)
expected = np.array([obj], dtype="m8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"exp",
[
IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])),
IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])),
],
)
def test_maybe_convert_objects_nullable_integer(self, exp):
# GH27335
arr = np.array([2, np.NaN], dtype=object)
result = lib.maybe_convert_objects(arr, convert_to_nullable_integer=True)
tm.assert_extension_array_equal(result, exp)
@pytest.mark.parametrize(
"convert_to_masked_nullable, exp",
[
(True, IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True]))),
(False, np.array([2, np.nan], dtype="float64")),
],
)
def test_maybe_convert_numeric_nullable_integer(
self, convert_to_masked_nullable, exp
):
# GH 40687
arr = np.array([2, np.NaN], dtype=object)
result = lib.maybe_convert_numeric(
arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
)
if convert_to_masked_nullable:
result = IntegerArray(*result)
tm.assert_extension_array_equal(result, exp)
else:
result = result[0]
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize(
"convert_to_masked_nullable, exp",
[
(
True,
FloatingArray(
np.array([2.0, 0.0], dtype="float64"), np.array([False, True])
),
),
(False, np.array([2.0, np.nan], dtype="float64")),
],
)
def test_maybe_convert_numeric_floating_array(
self, convert_to_masked_nullable, exp
):
# GH 40687
arr = np.array([2.0, np.nan], dtype=object)
result = lib.maybe_convert_numeric(
arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
)
if convert_to_masked_nullable:
tm.assert_extension_array_equal(FloatingArray(*result), exp)
else:
result = result[0]
tm.assert_numpy_array_equal(result, exp)
def test_maybe_convert_objects_bool_nan(self):
# GH32146
ind = Index([True, False, np.nan], dtype=object)
exp = np.array([True, False, np.nan], dtype=object)
out = lib.maybe_convert_objects(ind.values, safe=1)
tm.assert_numpy_array_equal(out, exp)
@pytest.mark.parametrize(
"data0",
[
True,
1,
1.0,
1.0 + 1.0j,
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
np.complex64(1),
np.complex128(1),
],
)
@pytest.mark.parametrize(
"data1",
[
True,
1,
1.0,
1.0 + 1.0j,
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
np.complex64(1),
np.complex128(1),
],
)
def test_maybe_convert_objects_itemsize(self, data0, data1):
# GH 40908
data = [data0, data1]
arr = np.array(data, dtype="object")
common_kind = np.find_common_type(
[type(data0), type(data1)], scalar_types=[]
).kind
kind0 = "python" if not hasattr(data0, "dtype") else data0.dtype.kind
kind1 = "python" if not hasattr(data1, "dtype") else data1.dtype.kind
if kind0 != "python" and kind1 != "python":
kind = common_kind
itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize)
elif is_bool(data0) or is_bool(data1):
kind = "bool" if (is_bool(data0) and is_bool(data1)) else "object"
itemsize = ""
elif is_complex(data0) or is_complex(data1):
kind = common_kind
itemsize = 16
else:
kind = common_kind
itemsize = 8
expected = np.array(data, dtype=f"{kind}{itemsize}")
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(result, expected)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
arr = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)
result = lib.maybe_convert_objects(arr, convert_datetime=True)
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize(
"idx",
[
pd.IntervalIndex.from_breaks(range(5), closed="both"),
pd.period_range("2016-01-01", periods=3, freq="D"),
],
)
def test_maybe_convert_objects_ea(self, idx):
result = lib.maybe_convert_objects(
np.array(idx, dtype=object),
convert_period=True,
convert_interval=True,
)
tm.assert_extension_array_equal(result, idx._data)
class TestTypeInference:
# Dummy class used for testing with Python objects
class Dummy:
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
@pytest.mark.parametrize("skipna", [True, False])
def test_length_zero(self, skipna):
result = lib.infer_dtype(np.array([], dtype="i4"), skipna=skipna)
assert result == "integer"
result = lib.infer_dtype([], skipna=skipna)
assert result == "empty"
# GH 18004
arr = np.array([np.array([], dtype=object), np.array([], dtype=object)])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "empty"
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "integer"
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), "foo"], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed-integer"
arr = np.array([1, 2, 3, 4, 5], dtype="i4")
result = lib.infer_dtype(arr, skipna=True)
assert result == "integer"
@pytest.mark.parametrize(
"arr, skipna",
[
(np.array([1, 2, np.nan, np.nan, 3], dtype="O"), False),
(np.array([1, 2, np.nan, np.nan, 3], dtype="O"), True),
(np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), False),
(np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), True),
],
)
def test_integer_na(self, arr, skipna):
# GH 27392
result = lib.infer_dtype(arr, skipna=skipna)
expected = "integer" if skipna else "integer-na"
assert result == expected
def test_infer_dtype_skipna_default(self):
# infer_dtype `skipna` default deprecated in GH#24050,
# changed to True in GH#29876
arr = np.array([1, 2, 3, np.nan], dtype=object)
result = lib.infer_dtype(arr)
assert result == "integer"
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
arr = np.array([np.bool_(True), np.bool_(False)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
arr = np.array([True, False, True, "foo"], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed"
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
arr = np.array([True, np.nan, False], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
result = lib.infer_dtype(arr, skipna=False)
assert result == "mixed"
def test_floats(self):
arr = np.array([1.0, 2.0, 3.0, np.float64(4), np.float32(5)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "floating"
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), "foo"], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed-integer"
arr = np.array([1, 2, 3, 4, 5], dtype="f4")
result = lib.infer_dtype(arr, skipna=True)
assert result == "floating"
arr = np.array([1, 2, 3, 4, 5], dtype="f8")
result = lib.infer_dtype(arr, skipna=True)
assert result == "floating"
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == "decimal"
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed"
result = lib.infer_dtype(arr[::-1], skipna=True)
assert result == "mixed"
arr = np.array([Decimal(1), Decimal("NaN"), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == "decimal"
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "decimal"
# complex is compatible with nan, so skipna has no effect
@pytest.mark.parametrize("skipna", [True, False])
def test_complex(self, skipna):
# gets cast to complex on array construction
arr = np.array([1.0, 2.0, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
arr = np.array([1.0, 2.0, 1 + 1j], dtype="O")
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "mixed"
result = lib.infer_dtype(arr[::-1], skipna=skipna)
assert result == "mixed"
# gets cast to complex on array construction
arr = np.array([1, np.nan, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
arr = np.array([1.0, np.nan, 1 + 1j], dtype="O")
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "mixed"
# complex with nans stays complex
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype="O")
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
# test smaller complex dtype; will pass through _try_infer_map fastpath
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
def test_string(self):
pass
def test_unicode(self):
arr = ["a", np.nan, "c"]
result = lib.infer_dtype(arr, skipna=False)
# This currently returns "mixed", but it's not clear that's optimal.
# This could also return "string" or "mixed-string"
assert result == "mixed"
arr = ["a", np.nan, "c"]
result = lib.infer_dtype(arr, skipna=True)
assert result == "string"
arr = ["a", "c"]
result = lib.infer_dtype(arr, skipna=False)
assert result == "string"
@pytest.mark.parametrize(
"dtype, missing, skipna, expected",
[
(float, np.nan, False, "floating"),
(float, np.nan, True, "floating"),
(object, np.nan, False, "floating"),
(object, np.nan, True, "empty"),
(object, None, False, "mixed"),
(object, None, True, "empty"),
],
)
@pytest.mark.parametrize("box", [Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == "datetime64"
def test_infer_dtype_datetime64(self):
arr = np.array(
[np.datetime64("2011-01-01"), np.datetime64("2011-01-01")], dtype=object
)
assert lib.infer_dtype(arr, skipna=True) == "datetime64"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
def test_infer_dtype_datetime64_with_na(self, na_value):
# starts with nan
arr = np.array([na_value, np.datetime64("2011-01-02")])
assert lib.infer_dtype(arr, skipna=True) == "datetime64"
arr = np.array([na_value, np.datetime64("2011-01-02"), na_value])
assert lib.infer_dtype(arr, skipna=True) == "datetime64"
@pytest.mark.parametrize(
"arr",
[
np.array(
[np.timedelta64("nat"), np.datetime64("2011-01-02")], dtype=object
),
np.array(
[np.datetime64("2011-01-02"), np.timedelta64("nat")], dtype=object
),
np.array([np.datetime64("2011-01-01"), Timestamp("2011-01-02")]),
np.array([Timestamp("2011-01-02"), np.datetime64("2011-01-01")]),
np.array([np.nan, Timestamp("2011-01-02"), 1.1]),
np.array([np.nan, "2011-01-01", Timestamp("2011-01-02")], dtype=object),
np.array([np.datetime64("nat"), np.timedelta64(1, "D")], dtype=object),
np.array([np.timedelta64(1, "D"), np.datetime64("nat")], dtype=object),
],
)
def test_infer_datetimelike_dtype_mixed(self, arr):
assert lib.infer_dtype(arr, skipna=False) == "mixed"
def test_infer_dtype_mixed_integer(self):
arr = np.array([np.nan, Timestamp("2011-01-02"), 1])
assert lib.infer_dtype(arr, skipna=True) == "mixed-integer"
@pytest.mark.parametrize(
"arr",
[
np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")]),
np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]),
np.array([datetime(2011, 1, 1), Timestamp("2011-01-02")]),
],
)
def test_infer_dtype_datetime(self, arr):
assert lib.infer_dtype(arr, skipna=True) == "datetime"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
@pytest.mark.parametrize(
"time_stamp", [Timestamp("2011-01-01"), datetime(2011, 1, 1)]
)
def test_infer_dtype_datetime_with_na(self, na_value, time_stamp):
# starts with nan
arr = np.array([na_value, time_stamp])
assert lib.infer_dtype(arr, skipna=True) == "datetime"
arr = np.array([na_value, time_stamp, na_value])
assert lib.infer_dtype(arr, skipna=True) == "datetime"
@pytest.mark.parametrize(
"arr",
[
np.array([Timedelta("1 days"), Timedelta("2 days")]),
np.array([np.timedelta64(1, "D"), np.timedelta64(2, "D")], dtype=object),
np.array([timedelta(1), timedelta(2)]),
],
)
def test_infer_dtype_timedelta(self, arr):
assert lib.infer_dtype(arr, skipna=True) == "timedelta"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
@pytest.mark.parametrize(
"delta", [Timedelta("1 days"), np.timedelta64(1, "D"), timedelta(1)]
)
def test_infer_dtype_timedelta_with_na(self, na_value, delta):
# starts with nan
arr = np.array([na_value, delta])
assert lib.infer_dtype(arr, skipna=True) == "timedelta"
arr = np.array([na_value, delta, na_value])
assert lib.infer_dtype(arr, skipna=True) == "timedelta"
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="D")])
assert lib.infer_dtype(arr, skipna=True) == "period"
# non-homogeneous freqs -> mixed
arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="M")])
assert lib.infer_dtype(arr, skipna=True) == "mixed"
@pytest.mark.parametrize("klass", [pd.array, Series, Index])
@pytest.mark.parametrize("skipna", [True, False])
def test_infer_dtype_period_array(self, klass, skipna):
# https://github.com/pandas-dev/pandas/issues/23553
values = klass(
[
Period("2011-01-01", freq="D"),
Period("2011-01-02", freq="D"),
pd.NaT,
]
)
assert lib.infer_dtype(values, skipna=skipna) == "period"
# periods but mixed freq
values = klass(
[
Period("2011-01-01", freq="D"),
Period("2011-01-02", freq="M"),
pd.NaT,
]
)
# with pd.array this becomes PandasArray which ends up as "unknown-array"
exp = "unknown-array" if klass is pd.array else "mixed"
assert lib.infer_dtype(values, skipna=skipna) == exp
def test_infer_dtype_period_mixed(self):
arr = np.array(
[Period("2011-01", freq="M"), np.datetime64("nat")], dtype=object
)
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array(
[np.datetime64("nat"), Period("2011-01", freq="M")], dtype=object
)
assert lib.infer_dtype(arr, skipna=False) == "mixed"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
def test_infer_dtype_period_with_na(self, na_value):
# starts with nan
arr = np.array([na_value, Period("2011-01", freq="D")])
assert lib.infer_dtype(arr, skipna=True) == "period"
arr = np.array([na_value, Period("2011-01", freq="D"), na_value])
assert lib.infer_dtype(arr, skipna=True) == "period"
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[
Timestamp("20170612", tz="US/Eastern"),
Timestamp("20170311", tz="US/Eastern"),
],
[date(2017, 6, 12), Timestamp("20170311", tz="US/Eastern")],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)],
],
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == ("datetime", False)
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)],
],
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == ("timedelta", False)
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == ("date", False)
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz="US/Eastern")],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz="US/Eastern")],
],
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data)[0] == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"],
],
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second, expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == (expected, False)
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == "floating"
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr, skipna=True) == "empty"
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == "empty"
assert lib.infer_dtype(arr, skipna=False) == "mixed"
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
# np.datetime64(nat)
arr = np.array([np.datetime64("nat")])
assert lib.infer_dtype(arr, skipna=False) == "datetime64"
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "datetime64"
arr = np.array([pd.NaT, n, np.datetime64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "datetime64"
arr = np.array([np.timedelta64("nat")], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == "timedelta"
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "timedelta"
arr = np.array([pd.NaT, n, np.timedelta64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "timedelta"
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64("nat"), np.timedelta64("nat"), np.nan])
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array([np.timedelta64("nat"), np.datetime64("nat")], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == "mixed"
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64("nat")])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64("nat")])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64("nat"), np.timedelta64("nat")])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array(
[
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130102", tz="US/Eastern"),
],
dtype=object,
)
)
assert not lib.is_datetime_with_singletz_array(
np.array(
[
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130102", tz="CET"),
],
dtype=object,
)
)
@pytest.mark.parametrize(
"func",
[
"is_datetime_array",
"is_datetime64_array",
"is_bool_array",
"is_timedelta_or_timedelta64_array",
"is_date_array",
"is_time_array",
"is_interval_array",
],
)
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(["foo", "bar"])
assert not func(arr)
assert not func(arr.reshape(2, 1))
arr = np.array([1, 2])
assert not func(arr)
assert not func(arr.reshape(2, 1))
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == "date"
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates, skipna=False)
assert result == "mixed"
result = lib.infer_dtype(dates, skipna=True)
assert result == "date"
@pytest.mark.parametrize(
"values",
[
[date(2020, 1, 1), Timestamp("2020-01-01")],
[Timestamp("2020-01-01"), date(2020, 1, 1)],
[date(2020, 1, 1), pd.NaT],
[pd.NaT, date(2020, 1, 1)],
],
)
@pytest.mark.parametrize("skipna", [True, False])
def test_infer_dtype_date_order_invariant(self, values, skipna):
# https://github.com/pandas-dev/pandas/issues/33741
result = lib.infer_dtype(values, skipna=skipna)
assert result == "date"
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(["foo", "bar"]))
assert not lib.is_string_array(
np.array(["foo", "bar", pd.NA], dtype=object), skipna=False
)
assert lib.is_string_array(
np.array(["foo", "bar", pd.NA], dtype=object), skipna=True
)
# NaN is not valid for string array, just NA
assert not lib.is_string_array(
np.array(["foo", "bar", np.nan], dtype=object), skipna=True
)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
lib.to_object_array_tuples(values)
# make sure record array works
record = namedtuple("record", "x y")
r = record(5, 6)
values = [r]
lib.to_object_array_tuples(values)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype="O")
result = lib.infer_dtype(arr, skipna=False)
assert result == "mixed"
result = lib.infer_dtype(arr, skipna=True)
assert result == "empty"
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(
[[1, 2, 3, None, None], [4, 5, 6, None, None]], dtype=object
)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(Period("2011-01", freq="M"))
assert not lib.is_period(PeriodIndex(["2011-01"], freq="M"))
assert not lib.is_period(Timestamp("2011-01"))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
arr = Categorical(list("abc"))
result = lib.infer_dtype(arr, skipna=True)
assert result == "categorical"
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == "categorical"
arr = Categorical(list("abc"), categories=["cegfab"], ordered=True)
result = lib.infer_dtype(arr, skipna=True)
assert result == "categorical"
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == "categorical"
@pytest.mark.parametrize("asobject", [True, False])
def test_interval(self, asobject):
idx = pd.IntervalIndex.from_breaks(range(5), closed="both")
if asobject:
idx = idx.astype(object)
inferred = lib.infer_dtype(idx, skipna=False)
assert inferred == "interval"
inferred = lib.infer_dtype(idx._data, skipna=False)
assert inferred == "interval"
inferred = lib.infer_dtype(Series(idx, dtype=idx.dtype), skipna=False)
assert inferred == "interval"
@pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0])
def test_interval_mismatched_closed(self, value):
first = Interval(value, value, closed="left")
second = Interval(value, value, closed="right")
# if closed match, we should infer "interval"
arr = np.array([first, first], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == "interval"
# if closed dont match, we should _not_ get "interval"
arr2 = np.array([first, second], dtype=object)
assert lib.infer_dtype(arr2, skipna=False) == "mixed"
def test_interval_mismatched_subtype(self):
first = Interval(0, 1, closed="left")
second = Interval(Timestamp(0), Timestamp(1), closed="left")
third = Interval(Timedelta(0), Timedelta(1), closed="left")
arr = np.array([first, second])
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array([second, third])
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array([first, third])
assert lib.infer_dtype(arr, skipna=False) == "mixed"
# float vs int subdtype are compatible
flt_interval = Interval(1.5, 2.5, closed="left")
arr = np.array([first, flt_interval], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == "interval"
@pytest.mark.parametrize("klass", [pd.array, Series])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]])
def test_string_dtype(self, data, skipna, klass, nullable_string_dtype):
# StringArray
val = klass(data, dtype=nullable_string_dtype)
inferred = lib.infer_dtype(val, skipna=skipna)
assert inferred == "string"
@pytest.mark.parametrize("klass", [pd.array, Series])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("data", [[True, False, True], [True, False, pd.NA]])
def test_boolean_dtype(self, data, skipna, klass):
# BooleanArray
val = klass(data, dtype="boolean")
inferred = lib.infer_dtype(val, skipna=skipna)
assert inferred == "boolean"
class TestNumberScalar:
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number("x")
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64("2011-01-01"))
assert not is_number(Timestamp("2011-01-01"))
assert not is_number(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta("1 days"))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, "D"))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(False)
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool("x")
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64("2011-01-01"))
assert not is_bool(Timestamp("2011-01-01"))
assert not is_bool(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, "D"))
assert not is_bool(Timedelta("1 days"))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(False)
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer("x")
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64("2011-01-01"))
assert not is_integer(Timestamp("2011-01-01"))
assert not is_integer(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta("1 days"))
assert not is_integer(np.timedelta64(1, "D"))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(False)
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float("x")
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64("2011-01-01"))
assert not is_float(Timestamp("2011-01-01"))
assert not is_float(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, "D"))
assert not is_float(Timedelta("1 days"))
def test_is_datetime_dtypes(self):
ts = pd.date_range("20130101", periods=3)
tsa = pd.date_range("20130101", periods=3, tz="US/Eastern")
assert is_datetime64_dtype("datetime64")
assert is_datetime64_dtype("datetime64[ns]")
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype("datetime64")
assert is_datetime64_ns_dtype("datetime64[ns]")
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype("datetime64")
assert is_datetime64_any_dtype("datetime64[ns]")
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype("datetime64")
assert not is_datetime64tz_dtype("datetime64[ns]")
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ["US/Eastern", "UTC"]:
dtype = f"datetime64[ns, {tz}]"
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype("timedelta64")
assert is_timedelta64_dtype("timedelta64[ns]")
assert not is_timedelta64_ns_dtype("timedelta64")
assert is_timedelta64_ns_dtype("timedelta64[ns]")
tdi = TimedeltaIndex([1e14, 2e14], dtype="timedelta64[ns]")
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype("timedelta64[ns]"))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype("timedelta64"))
assert not is_timedelta64_ns_dtype(tdi.astype("timedelta64[h]"))
class TestIsScalar:
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Fraction())
assert is_scalar(0.0)
assert is_scalar(1)
assert is_scalar(complex(2))
assert is_scalar(float("NaN"))
assert is_scalar(np.nan)
assert is_scalar("foobar")
assert is_scalar(b"foobar")
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
assert is_scalar(pd.NA)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1,))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.0))
assert is_scalar(np.int32(1))
assert is_scalar(np.complex64(2))
assert is_scalar(np.object_("foobar"))
assert is_scalar(np.str_("foobar"))
assert is_scalar(np.unicode_("foobar"))
assert is_scalar(np.bytes_(b"foobar"))
assert is_scalar(np.datetime64("2014-01-01"))
assert is_scalar(np.timedelta64(1, "h"))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [
np.array(1),
np.array("foobar"),
np.array(np.datetime64("2014-01-01")),
np.array(np.timedelta64(1, "h")),
np.array(np.datetime64("NaT")),
]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scalar_numpy_arrays(self):
for a in [
np.array([]),
np.array([[]]),
np.matrix("1; 2"),
]:
assert not is_scalar(a)
assert not is_scalar(MockNumpyLikeArray(a))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp("2014-01-01"))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period("2014-01-01"))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
assert is_scalar(pd.offsets.Minute(3))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series(dtype=object))
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
assert not is_scalar(Categorical([]))
assert not is_scalar(DatetimeIndex([])._data)
assert not is_scalar(TimedeltaIndex([])._data)
assert not is_scalar(DatetimeIndex([])._data.to_period("D"))
assert not is_scalar(pd.array([1, 2, 3]))
def test_is_scalar_number(self):
# Number() is not recognied by PyNumber_Check, so by extension
# is not recognized by is_scalar, but instances of non-abstract
# subclasses are.
class Numeric(Number):
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
num = Numeric(1)
assert is_scalar(num)
def test_datetimeindex_from_empty_datetime64_array():
for unit in ["ms", "us", "ns"]:
idx = DatetimeIndex(np.array([], dtype=f"datetime64[{unit}]"))
assert len(idx) == 0
def test_nan_to_nat_conversions():
df = DataFrame(
{"A": np.asarray(range(10), dtype="float64"), "B": Timestamp("20010101")}
)
df.iloc[3:6, :] = np.nan
result = df.loc[4, "B"]
assert result is pd.NaT
s = df["B"].copy()
s[8:9] = np.nan
assert s[8] is pd.NaT
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scipy_sparse(spmatrix):
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = ensure_int32(values)
assert result.dtype == np.int32
values = np.arange(10, dtype=np.int64)
result = ensure_int32(values)
assert result.dtype == np.int32
| 34.285934 | 88 | 0.597458 |
7953d6718675c6416d04d20c4ccca1176e1469c7 | 9,488 | py | Python | tds/commands/application.py | minddrive/tds | 573836434c76603fdd3dd9e07545b48f86e5f70f | [
"Apache-2.0"
] | 1 | 2020-01-02T13:44:23.000Z | 2020-01-02T13:44:23.000Z | tds/commands/application.py | minddrive/tds | 573836434c76603fdd3dd9e07545b48f86e5f70f | [
"Apache-2.0"
] | 1 | 2016-08-16T18:35:51.000Z | 2016-08-16T18:35:51.000Z | tds/commands/application.py | minddrive/tds | 573836434c76603fdd3dd9e07545b48f86e5f70f | [
"Apache-2.0"
] | 1 | 2016-08-02T06:06:35.000Z | 2016-08-02T06:06:35.000Z | # Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands to manage the TDS applications."""
import logging
import tagopsdb
import tagopsdb.deploy.repo
import tds.exceptions
import tds.model
from .base import BaseController, validate
log = logging.getLogger('tds')
class ApplicationController(BaseController):
"""Commands to manage TDS applications."""
access_levels = dict(
add='admin',
add_apptype='admin',
delete='admin',
delete_apptype='admin',
list='environment',
update='environment',
)
@staticmethod
def add(application, job_name, deploy_type, arch, build_type, build_host,
**_kwds):
"""Add an application."""
app_name = application
application = tds.model.Application.get(name=app_name)
if application is not None:
raise tds.exceptions.AlreadyExistsError(
"Application already exists: %s", application.name
)
tds.model.Application.verify_arch(arch)
tds.model.Application.verify_build_type(build_type)
log.debug('Creating application %r', app_name)
# NOTE: create() does not give a proper exception/traceback
# if there's missing keyword information
return dict(result=tds.model.Application.create(
name=app_name,
deploy_type=deploy_type,
validation_type='matching',
path=job_name,
arch=arch,
build_type=build_type,
build_host=build_host
))
@validate('application')
@validate('project')
def add_apptype(self, project, application, apptypes, **_params):
"""Add a specific application type (or types) to the given project
and application pair
"""
log.debug('Adding application type(s) for a given '
'project/application pair')
# Validate apptypes (targets) first and get target objects
targets = []
for apptype in apptypes:
target = tds.model.AppTarget.get(name=apptype)
if target is None:
raise tds.exceptions.NotFoundError('Deploy target', [apptype])
elif tagopsdb.model.ProjectPackage.get(
project_id=project.id,
app_id=target.id,
pkg_def_id=application.id
) is not None:
raise tds.exceptions.InvalidOperationError(
'Apptype "%s" is already a part of the project "%s" and '
'application "%s" pair',
apptype, project.name, application.name
)
else:
targets.append(target)
tagopsdb.deploy.repo.add_project_package_mapping(
project, application, targets
)
tagopsdb.Session.commit()
log.debug('Committed database changes')
return dict(
result=dict(
application=application.name,
project=project.name,
targets=apptypes,
)
)
@validate('application')
def delete(self, application, **_kwds):
"""Remove a given application."""
if len(application.targets):
raise tds.exceptions.AssociatedTargetsError(
'Application "%s" has associated targets: %s',
application.name,
', '.join([x.name for x in application.targets])
)
log.debug('Removing application %r', application.name)
application.delete()
return dict(result=application)
@validate('targets')
@validate('application')
@validate('project')
def delete_apptype(self, application, project, apptypes, **_params):
"""Delete a specific application type (or types) from the given
project and application pair
"""
assert len(apptypes) == 1, "too many apptypes: %r" % apptypes
apptype = apptypes[0]
# Check for active host deployments
host_deps = tds.model.HostDeployment.all(
order_by='realized', desc=True
)
for host_dep in host_deps:
if host_dep.application != application:
continue
if host_dep.app_target != apptype:
continue
if host_dep.status == 'failed':
continue
if host_dep.host_state == 'operational':
raise tds.exceptions.InvalidOperationError(
'Apptype "%s" still has active deployments',
apptype.name
)
# Check for active tier deployments
app_deps = tds.model.AppDeployment.find(
target=apptype, order_by='realized', desc=True
)
app_target = tds.model.AppTarget.get(name=apptype.name)
for app_dep in app_deps:
if app_dep.application != application:
continue
if app_dep.status == 'invalidated':
continue
for app_host in app_target.hosts:
if app_host.state == 'operational':
raise tds.exceptions.InvalidOperationError(
'Apptype "%s" still has active deployments',
apptype.name
)
apptype.remove_application(application, project=project)
return dict(
result=dict(
application=application,
project=project,
target=apptype,
)
)
@validate('application')
def list(self, applications=(), **_kwds):
"""Show information for requested applications
(or all applications).
"""
if len(applications) == 0:
applications = tds.model.Application.all()
return dict(result=applications)
@staticmethod
def _parse_properties(properties, valid_attrs=None, mappings=None):
"""
Parse properties for the update function.
properties should be a string of the following form:
'attr1=val1 attr2=val2 ...'
valid_attrs should be an iterable. If valid_attrs is None, no checks
will be performed on the validity of an attr.
mappings should be a dictionary that maps attrs to their appropriate
names for use. If no mappings are passed, no mappings are made.
Note: attrs are checked against valid_attrs, not mappings of attrs.
"""
parsed = dict()
for declaration in properties:
try:
attr, val = declaration.split('=', 1)
except ValueError:
raise tds.exceptions.InvalidInputError(
("Invalid properties: %s. Split on '=' once for the "
"declaration %s returned 1 argument, expected 2"),
properties,
declaration,
)
if mappings and attr in mappings:
mapped_attr = mappings[attr]
else:
mapped_attr = attr
if valid_attrs and attr not in valid_attrs:
raise tds.exceptions.InvalidInputError(
"Invalid attribute: %s. Valid attributes are: %r",
attr, valid_attrs
)
elif mapped_attr in parsed:
raise tds.exceptions.InvalidInputError(
"Attribute appeared more than once: %s", attr
)
parsed[mapped_attr] = val
return parsed
@validate('application')
def update(self, application, properties, **kwargs):
"""
Update an existing application's properties.
properties should be a string of the following form:
'attr1=val1 attr2=val2 ...'
"""
if kwargs['user_level'] == "admin":
valid_attrs = (
'name', 'deploy_type', 'arch', 'build_type', 'build_host',
'job_name', 'repository'
)
else:
valid_attrs = ('job_name', 'repository')
mappings = dict(job_name="path")
properties = self._parse_properties(properties, valid_attrs, mappings)
updated = False
for attr in properties:
if getattr(application, attr) != properties[attr]:
meth = getattr(tds.model.Application, 'verify_%s' % attr,
None)
if meth is not None:
meth(properties[attr])
setattr(application, attr, properties[attr])
updated = True
if updated:
tagopsdb.Session.commit()
return dict(result=application)
else:
raise tds.exceptions.InvalidOperationError(
("Update values match current values for application %s. "
"Nothing to do."),
application.name
)
| 34.129496 | 78 | 0.574304 |
7953d6cc35a9f40a4fb409038ff1830710cfd387 | 3,424 | py | Python | code/data_process.py | IBM/answer-type-prediction | 5818237ed9f0f5de6f8b8de8f16ef7efabbf414b | [
"Apache-2.0"
] | null | null | null | code/data_process.py | IBM/answer-type-prediction | 5818237ed9f0f5de6f8b8de8f16ef7efabbf414b | [
"Apache-2.0"
] | null | null | null | code/data_process.py | IBM/answer-type-prediction | 5818237ed9f0f5de6f8b8de8f16ef7efabbf414b | [
"Apache-2.0"
] | null | null | null | import torch
from torch.utils.data import TensorDataset
import utils
def get_context_representation(
text,
tokenizer,
max_seq_length,
):
context_tokens = tokenizer.tokenize(text)
context_ids = tokenizer.convert_tokens_to_ids(context_tokens)
context_ids = context_ids[:(max_seq_length-4)]
context_ids = tokenizer.convert_tokens_to_ids(["[CLS]", "[unused1]"]) + context_ids + tokenizer.convert_tokens_to_ids(["[SEP]"])
padding = [0] * (max_seq_length - len(context_ids))
context_ids += padding
assert len(context_ids) == max_seq_length
return context_ids
def prepare_type_labels(
data_in,
num_types,
name_to_type_id
):
out_type_labels = [0.0] * num_types
if data_in["category"] == "resource":
positive_types_names = data_in["type"]
assert type(positive_types_names) == list
positive_types_ids = [name_to_type_id[name] for name in positive_types_names]
for index in positive_types_ids:
out_type_labels[index] = 1.0
return out_type_labels
def prepare_answer_category_label(data_in, answer_category_to_id):
if data_in["category"] == "resource":
answer_category_label = answer_category_to_id["resource"]
else:
assert len(data_in["type"]) == 1
assert type(data_in["type"]) == list
answer_category_label = answer_category_to_id[data_in["type"][0]]
return answer_category_label
def process_data_1(data, type_id_to_name_file, tokenizer, num_types, max_context_len=64, logger=None):
# requirements:
# context_ids, type_labels, answer_category_labels, is_type_resource
type_id_to_name = utils.read_json(type_id_to_name_file)
if type(list(type_id_to_name.keys())[0]) is not int:
type_id_to_name = {int(key): value for key, value in type_id_to_name.items()}
name_to_type_id = utils.exchange_keys_and_values(type_id_to_name)
context_ids = []
type_labels = []
answer_category_labels = []
is_type_resource = []
for data_item in data:
cntxt_ids = get_context_representation(data_item["question"], tokenizer, max_context_len)
t_labels = prepare_type_labels(data_item, num_types, name_to_type_id)
a_c_labels = prepare_answer_category_label(data_item, utils.answer_category_to_id)
is_resource = 1.0 if data_item["category"] == "resource" else 0.0
context_ids.append(cntxt_ids)
type_labels.append(t_labels)
answer_category_labels.append(a_c_labels)
is_type_resource.append(is_resource)
context_ids = torch.tensor(context_ids)
type_labels = torch.tensor(type_labels)
answer_category_labels = torch.tensor(answer_category_labels)
is_type_resource = torch.tensor(is_type_resource)
tensor_dataset = TensorDataset(context_ids, type_labels, answer_category_labels, is_type_resource)
return tensor_dataset
def process_typed_data_factory(model_number, params, samples, tokenizer, logger):
if model_number in [1]:
tensor_data = process_data_1(
data=samples,
type_id_to_name_file=params["type_id_to_name_file"],
tokenizer=tokenizer,
num_types=params["num_types"],
max_context_len=params["max_context_length"],
logger=logger
)
else:
assert False, "Unsupported value passed for model_number"
return tensor_data
| 34.24 | 132 | 0.710864 |
7953d72c3698648f598a58f2c9f5a8f0542f0017 | 25,500 | py | Python | tasks-deploy/patched-python/check.py | HackerDom/qctf-starter-2018 | f4eef0fd41d777661b9fbcc61dcee9709d9f6268 | [
"MIT"
] | 8 | 2018-03-15T12:07:11.000Z | 2020-12-01T15:02:46.000Z | tasks-deploy/patched-python/check.py | HackerDom/qctf-starter-2018 | f4eef0fd41d777661b9fbcc61dcee9709d9f6268 | [
"MIT"
] | 17 | 2020-01-28T22:17:42.000Z | 2022-03-11T23:18:09.000Z | tasks-deploy/patched-python/check.py | HackerDom/qctf-starter-2018 | f4eef0fd41d777661b9fbcc61dcee9709d9f6268 | [
"MIT"
] | 2 | 2018-11-26T18:54:27.000Z | 2018-12-05T17:37:32.000Z | flags = ['QCTF{1379bae3fba0b9b5279f09d3f6191b98}', 'QCTF{789d8f8ccbbabc0931c867d14e550a77}', 'QCTF{cfec60d2c289a4d100a9c375697a3498}', 'QCTF{caf1209affb7e796868211e18021f434}', 'QCTF{d44770ff79bcdb4abfabbc1b4eb5c8df}', 'QCTF{d385a633a8b527f95d2efeb49fabfdc7}', 'QCTF{4e1e9a31a860c496f037f19a45805ccc}', 'QCTF{b12cc106e92e3c80520ec04d6ddef9c6}', 'QCTF{3a05c44286d192e4a770e629db161225}', 'QCTF{eb0fed4394d9422bf57a0e130abb1a26}', 'QCTF{2a410f90ae7aca4c26643e2c5b9d81b7}', 'QCTF{b0a9ee2fb03ca334ba46315f85aea05d}', 'QCTF{b94ab6080e9589318a9c925168d9d8af}', 'QCTF{ae13757d1b6585d51867ac03f24e2d54}', 'QCTF{1dda30a5dc9492489d12e418c6213e5c}', 'QCTF{9f905fc38bd09873105afee72500291a}', 'QCTF{ed5f31cdba2ed52a22e8fa74337efb70}', 'QCTF{c6fa7ef00ef2d8f3a9942243bd51e012}', 'QCTF{8ec1cb4f3e434140b00a19d54ad0ea25}', 'QCTF{c150983226fcab140ff9dfd2d6ce0df}', 'QCTF{804b6b6f291bd8cdceac5217da9cd3ad}', 'QCTF{55af044485f9f52723bb26b9d5970c9b}', 'QCTF{6c5113df5ede6784e5c196245356a707}', 'QCTF{6ee0ae8ad106e4091d64bbfd822a9b50}', 'QCTF{9ac6d589afe45f0c4b201a97edc5cfe2}', 'QCTF{89a83845db2ec7320069724d33c40cbf}', 'QCTF{832c1d408f34793018b83fb60d06022e}', 'QCTF{dea1b09fc35a42584de9f26d49f1620e}', 'QCTF{39c86092142f08fab117d6e20b7d10f5}', 'QCTF{4f32723d7b772f77ecdd3efe3484a41d}', 'QCTF{343bc7393768d1797ddf99b7e1ad4c2f}', 'QCTF{b5be744913b6a85c07ecafa30ce93680}', 'QCTF{42dc65474ba400f209d105e42f8a2b67}', 'QCTF{8441f950ea6299eee3a47df6b976d869}', 'QCTF{420b0cc623e0be50d7613c68887dea4b}', 'QCTF{f215de868e57f769698b6e5551f64b03}', 'QCTF{2b8554ca272cd010e8e412b97e4571a9}', 'QCTF{15aeb63ba7f639418a6ff2ce72b37d6d}', 'QCTF{4cac40235c985a630acb306451170cd7}', 'QCTF{97ecb939ae3b1277cef4423bc2826bd0}', 'QCTF{313fb4fa56d97f8511d3f5d44db27097}', 'QCTF{e49cd665afeca264d14f9027d3b57469}', 'QCTF{484e9fe9e4088599f0620b5d442147c0}', 'QCTF{ab62fc0259ca746bb4f6cb58fb84afe}', 'QCTF{e915d72753f1dcbebbc121039ab08ef0}', 'QCTF{cc70fab53f015e01202475ae25369d09}', 'QCTF{7c379912a45b100953c7604fe49654cb}', 'QCTF{9b9b628a3ec5c4eaad4fcd25c941e49}', 'QCTF{dc0d5df5121b8f06d269f0132322fd83}', 'QCTF{72c18315c18e71acd22a5ea2d3a041c0}', 'QCTF{aa8349c236403a0dbc65d0709c3da5f7}', 'QCTF{f17a8bddc975fd98db3c39b33fcd4f9a}', 'QCTF{a41d44ccd8203742d4b5725a9e174b19}', 'QCTF{42ff1eeba042c0f245689ce80de4073e}', 'QCTF{92f29e3eab7e8ee12292291d359ca9f8}', 'QCTF{214eaac8cdd54ead08ca455cdc4cff13}', 'QCTF{e9a0b022f782f4a4a913f1f011754537}', 'QCTF{96afcdf02bc7cc4db66937b8460840a2}', 'QCTF{ee261290a4e5a2d98afa839b0576b8cf}', 'QCTF{664d424cf00d3b1ed21ad97b544f69f7}', 'QCTF{5807225a8c9bb8a56843d7a33d682af2}', 'QCTF{b440e37114511fd8e545a80e5644c824}', 'QCTF{585a503c160280d32fce6aa304bbdebf}', 'QCTF{b2f3a38d63bc0e9c1bfcf3fa05e5df5a}', 'QCTF{f12ef76d12e703762c9ffbf04d85ede7}', 'QCTF{d88bdfe974b7c8192158032367ae0b97}', 'QCTF{74a80a5c68e42a400c07947a86b54b99}', 'QCTF{62a4364b48e01afd1ba676e76fecbb3e}', 'QCTF{f2387bda2c8fc5a43418c89a012585a0}', 'QCTF{cfa3b0b468ee3545f1fc73f4a5f1559}', 'QCTF{e0c9bfa03ab0b9d84585348323646e9d}', 'QCTF{18829379b33fff49bb88e9383f30f8ee}', 'QCTF{d8be27ffffa3bc8e72ac7d0fc4400c0b}', 'QCTF{f612f1f2e4c50fb70b1d59a4ac63ea13}', 'QCTF{6f31badcbf9ca9e35e4551a7da20ed3}', 'QCTF{f32566c610601dcd253606a9b615bc0d}', 'QCTF{3e0c5b219c41744ebae3cdb8037adbaa}', 'QCTF{5e0622b68ee58e32f5ce42e36dd37d7a}', 'QCTF{5339111ee215f3d0e618c3b5cb0ef286}', 'QCTF{afde3be4899f1eba4d25fb87e95e8495}', 'QCTF{117de3940811917db7d017f6b33d54fb}', 'QCTF{4bb5a526dab06829b7da9a324b10ce2b}', 'QCTF{3cb8e1eb99704138b63fe034f850555b}', 'QCTF{e579160e5492c8383abe2486099ecaab}', 'QCTF{fa789dbed7eed95af35ebc9c8b44d03c}', 'QCTF{b1c123a472a1f2b251861e9be9bfb0e8}', 'QCTF{2c4769e097c31dd0718710aefe30146c}', 'QCTF{663a6bde4b2e8c877d52f33b0cead950}', 'QCTF{fbadde4754f63986133cb9bb3f7afd11}', 'QCTF{9b35838d2b09871337d26235f2766a7e}', 'QCTF{bc4298867318c836d4246723d1d11dbf}', 'QCTF{97ee7dc845bfdeac19417d7c4a2d5970}', 'QCTF{5c238531f2ea075a3800268c2d5e4ccc}', 'QCTF{85748fcd99cb620b6422d9f2a0d4cbac}', 'QCTF{b29457a850955846d4b8748ae1b0ab89}', 'QCTF{8fa6860d23e09702c11df071818b1276}', 'QCTF{712a1221bbe73730db6132c65d9b935f}', 'QCTF{4268bacb464fadcf6562e1c86c2f9901}', 'QCTF{c1b87351a069b966b209610dd6990788}', 'QCTF{25b6f2a0a904c2dbabeda480916840b0}', 'QCTF{bec8fd1fce853aa587f48378bf9a63c7}', 'QCTF{d776c9ff869b23770970ac8857f3533a}', 'QCTF{df6112ff87b450d1bf442b41705ef7c0}', 'QCTF{5d932c352b8e0bc2e136a8669188be3b}', 'QCTF{fca1ee0fe6efb4ce1390bf43071080ba}', 'QCTF{bdee061e9866e6053864c67876b773}', 'QCTF{caeea6ac1c65ca506bc1983f731311ad}', 'QCTF{b97886a4c029155975d00ceb4d5246db}', 'QCTF{fe0850b4f9c947b8604c33aa97d1413a}', 'QCTF{3ef4658391108b1573d9815553270efa}', 'QCTF{4bc1a35f46c485f51a634f18d7377463}', 'QCTF{a36fa0997c0fd9400b724d892ae8c0f5}', 'QCTF{2bc35cca07f876e5593e6312928b0719}', 'QCTF{947dc21cc9cbe8624edee2736b828bc2}', 'QCTF{1645854d027267c5d81bd67d1ed868f8}', 'QCTF{82785302769f3f7efe43c5524974c831}', 'QCTF{e15cf494c5dfa4424dd0cc921fb9932c}', 'QCTF{d7013b2744782789c88c80551260172e}', 'QCTF{a36383ec002b2a7d19d2610da56be44c}', 'QCTF{2f69026a957a8b1876f1b0ec1daea006}', 'QCTF{34d6efb57cfbffe1962d89ee6e6e259f}', 'QCTF{d78f4e925d79e53128ccfee8403d9db9}', 'QCTF{7028bfdfab98e88a6c1a9850a3f0eae6}', 'QCTF{fb1aa2ba9d237c2cf3834b497781a637}', 'QCTF{b6c44edb4af04612fecee6520c27919f}', 'QCTF{6ce3c300ad0d8f5bc1a81564dbdd6954}', 'QCTF{8ea0fd0ddf84142ae384c4d7f7691462}', 'QCTF{4a8a9e1aa7eff99841df55d679164807}', 'QCTF{b1367a294cc81d30c3845411eb98ae98}', 'QCTF{76d88528eddc22b12ab793984994a93f}', 'QCTF{919a56d918125d62473f8e02aa8dd3da}', 'QCTF{c7f1cf8b989257a92254f1ddcde428e5}', 'QCTF{e3ad6ab4411bb518f6ce7ed34c60e5c8}', 'QCTF{c843239ce6387647c6b087bc9d05bbaa}', 'QCTF{f841853b06c18933e5178204b4838381}', 'QCTF{f336ff63cbed30d9935b3c7e82e01499}', 'QCTF{25e4a1301e286aa02f118352661e14ea}', 'QCTF{a07d55eb0fd5eac7ec6eec8d55da8f30}', 'QCTF{c91b8dbbc074ff5b734d1f814a62f409}', 'QCTF{167eca1935ae8e2694d87793562151ee}', 'QCTF{eb3b4eaed0c7d1e777dedc65cc1c91c8}', 'QCTF{3300fa6c024b1607ac0236dbfd6fcb99}', 'QCTF{382bb0afc680add19872cd3f81425e39}', 'QCTF{a9d025582f9b370d5ce7f51e6b4c4ef8}', 'QCTF{55edf4775b07cfc191e413e0e5199b53}', 'QCTF{7cdc3d560acaf2ff3b4fc4ac3fbf9de}', 'QCTF{6e9e8517911e07ae71397e2c17a09e46}', 'QCTF{6f1bc6f1a2e2c7ec6c924cb2e64382c8}', 'QCTF{f1ba15d99e3f09f107c471f6c61f652d}', 'QCTF{bf07e91e851a058912c51c1171ac849b}', 'QCTF{e1ac3f7ad2f21616fb7d4789356becfd}', 'QCTF{39f5fbc93a5c1094232fa45539972f31}', 'QCTF{65aa7630b9e6aa8f1333cdbefb573c23}', 'QCTF{87e05250a43e8bc5841c157965009ab4}', 'QCTF{a23ccd02161c7f07b0bc9088039fa5e5}', 'QCTF{ce342cb2388ca9fb2a8ec8d1ffb6f5e}', 'QCTF{86868ff22aa1c7576f4027b9e85f62c1}', 'QCTF{5cf72746ec4ef217cc067ffd97375c17}', 'QCTF{a6a82d6ce541f56dc4da9bfde5bcb036}', 'QCTF{d6c7398feda3da2fa6c12e08d9ab8e0c}', 'QCTF{7dab9f502c6e283362676cce262f3b79}', 'QCTF{33a5f881cc04a55e0ae92f6c9993972d}', 'QCTF{88b4c1c74223de4ba03b018a464f160c}', 'QCTF{baf12d3e799023cc72b9889e11cfe28a}', 'QCTF{679f7ed9d359257be5f946eeba9a4be8}', 'QCTF{919c8360421484ed72b3f38702a34009}', 'QCTF{7d0485d328b957fd2d88c23349ef8085}', 'QCTF{95fe7e6cf4f885fb347e09531b13d326}', 'QCTF{9a26eb3e6ed01c91802dd6ca9306ee4b}', 'QCTF{3f762d3748dce734ddee49a23c12a595}', 'QCTF{f9bc8f5b1a34bcc0286a6d77c090a50d}', 'QCTF{2190c429bfdc719bf32297c24c25a5e3}', 'QCTF{2ecfa53611eae9cffada24578ada6795}', 'QCTF{8d2a286414fece5a3d37ca8886b1f1c}', 'QCTF{aa4d65b75a433621957feaa053144090}', 'QCTF{7f45b565ae2bdf8229cae7b7ea58b584}', 'QCTF{8172528ac20c025740ffb33d039e110e}', 'QCTF{472e4063dacb82c103e27d393df82133}', 'QCTF{a6f53ab8db260a46449faec3482161af}', 'QCTF{ae8423a4c7f183a93a92493271c10ff7}', 'QCTF{2b9c58041182476bbfdfdead84c64d97}', 'QCTF{298ecf4278fa7f9c685b4bc76f3617c}', 'QCTF{4ecf3425375ff40a745d3806920d8d0b}', 'QCTF{736744f713ed6b29302a02149e31b4cd}', 'QCTF{57899bad603157e980bd2c0899a582f9}', 'QCTF{de1608dbe9a4c54f910e1c0011409b7a}', 'QCTF{b0ea459c46514425b52c64b3b801298d}', 'QCTF{dc30c07d62f3453e1f488237bdb99029}', 'QCTF{e1999fd7da7c80180b0a6723cebaa105}', 'QCTF{dc87b8a58ed7f1060bcdb99c9a3e4456}', 'QCTF{7e0aaf1a4d16da4f4e9febb8c4776a23}', 'QCTF{290b68cc2fe20fcd6f9ff7fb3d7d857d}', 'QCTF{abaf115c997ae021837fba63af7538e}', 'QCTF{3f037d1b71aa64fc6baf7dcc0d1c3023}', 'QCTF{e4a2a1522c8fe94a8eb085e000eb9a79}', 'QCTF{1b38ac9a4922d30a68e0c0ca1a5c1eb8}', 'QCTF{ed183fd02d93ad9bf447a675e5ca50f3}', 'QCTF{92f0bac02ec3c460650ee00a3e44014}', 'QCTF{9cfa0b74b9c1cc630590bf6b27723173}', 'QCTF{f3738ea070ae84e91368938fd27bba5b}', 'QCTF{e4ebc1065d589f28de8c7a8b42d155e0}', 'QCTF{8235316253f972af51c131ec34ea5343}', 'QCTF{6a4f27a8f9591d2edec1a8d6e91eb701}', 'QCTF{2f802c7f75a7e36730a4ca06b5908323}', 'QCTF{6a201f25e386610af2985c4a9bfd12f1}', 'QCTF{960aa1789b260a437ae5acc194a6739d}', 'QCTF{90297d44453155abd0db30fd57ead275}', 'QCTF{dbdf5950aea4f788f281eb4330ed569c}', 'QCTF{5007208f085331760617bb3706195b31}', 'QCTF{889b4ca28ef31c2f973f9e8355a6bd1d}', 'QCTF{b4108543933ca4020513d471f11eb446}', 'QCTF{83684e8bfa571aeb0117090ddd6f387c}', 'QCTF{7e02e550b3f6ff303ecdafd7715be555}', 'QCTF{774dbe54c2f11c988ec0397911e18969}', 'QCTF{ccf2896b3eda8a313ee0af42e20e725d}', 'QCTF{b9fdcff0c8a701a30aa62830e9cfcb2}', 'QCTF{601f8349b1e01c204f02ae74f947e60a}', 'QCTF{a5287b31e3fde46bd096d8e9ed1f8044}', 'QCTF{e1434f496975f2a6b7a49daac1f817af}', 'QCTF{98069eb055a9b4cb89f00b180449b0ff}', 'QCTF{6208087ba1738eafafb2492a91731cc1}', 'QCTF{2ebc7b331b8def5d19efabc9675f57c0}', 'QCTF{55744242329a2cb592e1da05eeb7d980}', 'QCTF{3211fbd91f5eec5bf5ead4841bb70a3b}', 'QCTF{24d9d316c51d7eac668c8149d105a48f}', 'QCTF{c0a5ec884d6e1197101bf3d1b6c95a8}', 'QCTF{877926ab3ad4fde2c7ef9c595907891c}', 'QCTF{22e453f921ce4afad6aa0d173199a1f5}', 'QCTF{b7c01aff1fc3f7a6dbf7812d35c7a240}', 'QCTF{94006b92664321118189399c747228f1}', 'QCTF{ddfec89ba6bffa0b2a6724d3e727a55c}', 'QCTF{553ad94b4597858fb393f8b6af4a9a7}', 'QCTF{2494f551f07b2f51ce5845a64d5938d6}', 'QCTF{2731f9f7e9fe2b40b0db1ce8c543667e}', 'QCTF{b5edd7dcf55af229aef8cd3d362fa7c}', 'QCTF{36f6fc7989fa4c9053a44e013325d8e3}', 'QCTF{77ef8ad1780610954bbe9eeba2324b10}', 'QCTF{62885e734c337ab239ba2712f7a50823}', 'QCTF{3b32ce33fdf44cbb69823fc58ae2d73e}', 'QCTF{2fa1b14b62f1e7864914959fd36470ef}', 'QCTF{4744bb1db4ce660318678808571106c4}', 'QCTF{33f28b1b208115600b9d209ad193f3c0}', 'QCTF{3204e8fc78c45180db298e85f0481a67}', 'QCTF{289a222a0c51757f45a1b473d0b00c8e}', 'QCTF{31ac449e8c3c8cdc0e3c4ffb666ffb40}', 'QCTF{3224004806c094679f2406dfab3d6f44}', 'QCTF{d2cdb1e0bb7e88de8e5a61f2eb014cee}', 'QCTF{12f60babc319b2efeb92459891b1769f}', 'QCTF{f4558f329b235021611fcbb88e8557ac}', 'QCTF{8abef57edb1d6c34bb42303e8e568cde}', 'QCTF{e949496401445f6428b74fb1ced2593c}', 'QCTF{37f2b4646daf21f83689b4861e70390d}', 'QCTF{d5c2a83a2f048212c3a103ecd15b328}', 'QCTF{d801d70874c7fe2231d7401a0417f027}', 'QCTF{bed3f3f9af977049872edb8002de606c}', 'QCTF{28f7eaf388552beb82ae3d86067f6f7b}', 'QCTF{c816398587c31db5a82ad2b53f1bf07f}', 'QCTF{2d0fe63903f028dd2352e946c0c9d0d2}', 'QCTF{c3225535ff87986e476fecf1dcdea910}', 'QCTF{1df54b8a90bce133bc3765d6774a12fb}', 'QCTF{1d7f1680efe4b3661e83226824e3deb8}', 'QCTF{80c6c26b56310de5da39f995bf8bdf93}', 'QCTF{24e5ef078286828a44601c9261f8a5b0}', 'QCTF{4581572f89ec8db003e9e8fb29b56c38}', 'QCTF{bea9ba501adaf57ba16c0770b24b5171}', 'QCTF{7bcdd61de3cf7f5986d6a64b6f78784f}', 'QCTF{c97d1bdcb3668b219c60cb3f4785b20}', 'QCTF{a5e7427d882a6d71e7f6dac5cd0b835e}', 'QCTF{d6011962d5a9161b01c241eb38a444b1}', 'QCTF{3aad3c18b3d207f461e03b47b8b7923f}', 'QCTF{d05374a582b53919779a62ce5a03c77e}', 'QCTF{fe4e69f5f6cd597cfd896e5d7325fcf6}', 'QCTF{53a87f4889b81e720dbdebd8a1ce988d}', 'QCTF{353c5f695a769754c0dbaf0739f3bf7d}', 'QCTF{d8230e7594b81d0981ac2dab2b65b126}', 'QCTF{85377d9bc9d7842f054be705c68b68c0}', 'QCTF{46e35ab7178a7d0003b5557c24402d0}', 'QCTF{5b67c6885631ac188ff7cd8c9cd4285a}', 'QCTF{58045d3184357ee52ee33485f76807c1}', 'QCTF{bb1cd81aa9f8d7b53985fcff706799ef}', 'QCTF{61e2e60954d3853a2c2b7007e7d9e0a}', 'QCTF{3a4cab7634c4f091f283fd17b2909233}', 'QCTF{70f086c129a9f78cb16a17756ad59751}', 'QCTF{164a5f806173d135a4731e341e165a86}', 'QCTF{f3f0238ce2e81a8e78d8deb242793dc5}', 'QCTF{9b3b96b1b52db2742b2327442644adb2}', 'QCTF{7234916c9de7639ccd447b8232d1011b}', 'QCTF{d0d6daae7a63023b0ade577b375354c5}', 'QCTF{1cee1139de0c0e2bd9c5612e29d388ed}', 'QCTF{c1549850f4c07978de350e7edfb5791c}', 'QCTF{28d488068729836cef3fe3e4d2630f58}', 'QCTF{db529fbea15de5cfbc7bda6ec63ec734}', 'QCTF{85319658482135bd85b5ec71ea20ee79}', 'QCTF{ef0e44280721f043ce7f3d30edd81f06}', 'QCTF{9a570a6d5fb337e30ca221d7a2e661df}', 'QCTF{cb59a46f8fa4d2155b88eb4fd061f02}', 'QCTF{7d8819a012a1b477d017124054842098}', 'QCTF{5fcfaceb3caf630f0ae1eaecd8c47fb4}', 'QCTF{8f1cfc0248e4766ddfb8e27f1345c346}', 'QCTF{eb0ce593ceb619ca03bf5df62136f4be}', 'QCTF{8b4ea6c6cc375882856934710db044a6}', 'QCTF{3fdd74c9871727c64218c7437cba102f}', 'QCTF{8a0d7f4be57b6c87bf0a4d130b3f379d}', 'QCTF{3f74b8bda912c50699aeabca0ad0fd5e}', 'QCTF{b43555f32cda87384f2e5fef12319a30}', 'QCTF{8a964e78c4cc79c1cd43f86fc9d0b7e3}', 'QCTF{94240721500d4ba4b688342b6c320cfd}', 'QCTF{f54ba10a34a81dc0922157860212dc8f}', 'QCTF{6b3098bfba0708317ad30c1265c0e1c3}', 'QCTF{3720ec08bd558a026824444dd0fd11ba}', 'QCTF{15c5d1dfd6cb9d11b5dc4fbf34049fff}', 'QCTF{b98d553c4f2353c8c0bc8003d0437c5}', 'QCTF{5419f6a61f3b6af6deb56fefc971d663}', 'QCTF{1b455c80c8f73ff4c832fc1745e59133}', 'QCTF{96a7f6f659b2daa433328279096d6f87}', 'QCTF{c76e92a54e720ef59efed90524949b12}', 'QCTF{6f86e4acf84ec4d07f4c17ccd13ba8fe}', 'QCTF{c56b4e6af94213912647749e3254d528}', 'QCTF{af89844300f6f97f53e768c4e1e59ab4}', 'QCTF{fa27222f652450c32cb0e04525ea13f9}', 'QCTF{597f7b788dad1876940eccaae0e202de}', 'QCTF{1a27df8300bda395e576f3bfbabe861d}', 'QCTF{5c9120db3b51a84754d9c1e63f919f0e}', 'QCTF{8d221a0525a6d55d563ac07f9dc19886}', 'QCTF{f45862b76aa2af73fcb4d6ee146ec3f3}', 'QCTF{821e0087a2e84d4a2d1ac975fc94c3ed}', 'QCTF{eef198678a970f9ac10ef1bb6c406d54}', 'QCTF{7d863bed37e5efc45ac4edcefeec664b}', 'QCTF{6d0d8b66b78b0f3de9e2379270a16478}', 'QCTF{f352d64ae46a2d22e39805f026fefe2e}', 'QCTF{d2ca83696a5c8a8b94f61b7f66a53952}', 'QCTF{f7d914b0b2e6e4a9d87b112f5cb9f04e}', 'QCTF{11d32e9d7509fd8653ae688419ebb611}', 'QCTF{1b707bf6e2b2d3217fc4893568493f5f}', 'QCTF{fe74128fb15258a7b3532d37f99ffb12}', 'QCTF{922352011e42e4310180ed544221bf43}', 'QCTF{aefd7d84930b972964bf7b665c9f147e}', 'QCTF{e39b56771b39e516ae1679257be4427f}', 'QCTF{3f8268bb88209f4ba5b87d5019a42a36}', 'QCTF{d3a037049546ea92d579215019aa0483}', 'QCTF{a4e0dadbaa14704ee76f2589461f8983}', 'QCTF{99da2ebb5c2cb47969fcaf13241df5d1}', 'QCTF{7012e98c1b6e6ef49dc91e96def94dec}', 'QCTF{c25be591d6a115ee1b813602755d45f9}', 'QCTF{67020749f13b10f6f2bf6793e28221d8}', 'QCTF{ef8f225e8e2eb25a030094660e059181}', 'QCTF{31a057bbd27e30445745fbc1660628b7}', 'QCTF{3bd4d64d782ee089a88cdf7c2b4baa07}', 'QCTF{2a5bce433d731957e52e8366d7494b2f}', 'QCTF{451082f73d6e1b9722e4eed3b71cfb4}', 'QCTF{102de2dc62d9b04f539c913ecf9a05cd}', 'QCTF{98902eabbc6bb5326529569000503eb5}', 'QCTF{1e4822b79d3b2771752e2649478a7d02}', 'QCTF{57b6b329c3df0b7eff786520abdbfd66}', 'QCTF{9ad5842b516b8635be0935fee40112be}', 'QCTF{ea03287ec6ab7942a0ff5632f2f6f45d}', 'QCTF{8c878b6e2b574f163acedbd17901d9c1}', 'QCTF{563d0f77b6aed348df15440ebe804cc2}', 'QCTF{5c0178d38b4a1c16f9d2dce3450fd48}', 'QCTF{ceb613e534bd4d5106481ee849968db7}', 'QCTF{b529b6a0fb76abc663ef9124b7edb37b}', 'QCTF{e6a5432bcd7fee431ae24ede18a3f46e}', 'QCTF{b4d888e0789ef8533464a214852c43e8}', 'QCTF{30971526ec11dfe15123415326a1cd1b}', 'QCTF{30d9bc150a2f65f9e83e47edca61d610}', 'QCTF{1b168aeb29e59603c70d8d05cea68fae}', 'QCTF{4130ed7e8b77d40470efa5eeaa19a91b}', 'QCTF{1d8ae553d233e3ff03d701f111ec3b35}', 'QCTF{66c35d14d5480dc84ec6cf06e2598d42}', 'QCTF{ab18b46c4779ebcde775de6ca8e99a1a}', 'QCTF{3f98589b61e70caafc0cedbf99b74ba0}', 'QCTF{6d8f79b7534c1cbdc1bbd8e9fb6cd700}', 'QCTF{85539dc204b23049804d2de24ce8b644}', 'QCTF{27c9559382981d7b06767a4dca9dc32}', 'QCTF{4f9fa4c1ce537d104ef75c4694a0beb1}', 'QCTF{ee7a88e59c7077c30a3e688531356186}', 'QCTF{df50b11484c208245e03dc4ca85f24c4}', 'QCTF{8a17585fe8c70a5c2c8350493ab284b9}', 'QCTF{59beca4621a9577ded08a478e5fe8c9c}', 'QCTF{57672573e2877d428810a835d271b3df}', 'QCTF{8546aa77f94c436561c840046708042f}', 'QCTF{543d2e10821a2e6916279700b0ce217b}', 'QCTF{60cf778a92aa0bcb7d24e3d2fc177777}', 'QCTF{7cba2fa1f54ad4dc46cbed884f9b091a}', 'QCTF{1e06436d263aa02a2efd2498b3e6c660}', 'QCTF{ef6f339ccb301c8a9541c68893ea290e}', 'QCTF{4955d527db0af5f44030748fb4ff2fbf}', 'QCTF{b769ee03c3468918cadc54f2bf982cce}', 'QCTF{93f4aef0432c1f3f5a87ea81f73bbdd4}', 'QCTF{bb7be2a107b7ca551f512e8977ac37e6}', 'QCTF{e21d3a6840d8205fb9eb98f4c4d82916}', 'QCTF{f1bd9a644b3b2adc873ea2b89fe6f2f5}', 'QCTF{336a4347957a4cc010e8ce641b6bb787}', 'QCTF{a6e68fd474b640658ff3744947ce29ea}', 'QCTF{204a6e6ad56fe3d0380c81456b29171}', 'QCTF{720687d1ffa83c3e551200a738ccdaf7}', 'QCTF{36955914934675315065f8f34f1b437d}', 'QCTF{5a4946cf4509a953d61e1d082f94f8b4}', 'QCTF{24fa31329b7b4b1377bd244b69f91e24}', 'QCTF{e82f041e9ec9e00c9e6a3be8c2f58a35}', 'QCTF{cf50f974acfda15933672dbe13d260c9}', 'QCTF{94bb4817d05d7607eb7732282f30072}', 'QCTF{adf3c57c75c8b14b4341d81658b55a1e}', 'QCTF{e44e0dd3ff7a20a384fcb07155409735}', 'QCTF{5282b6ae1da9bfdd1bfdd2baf1831e0d}', 'QCTF{fe0d5097f4cf2e2c70a9ef74b0bde62c}', 'QCTF{5689a2933c6243a6bf2cacb212f08bc1}', 'QCTF{d22970bcd33f7f386fb8016ec747fb0f}', 'QCTF{4e19bed752ac40fd2ae3ac004382f9ae}', 'QCTF{9eff0315317b7a30bf5b8ca058b6567a}', 'QCTF{8f14061c9f099b85f0fff2be83e888f2}', 'QCTF{4d097e039c069849d050e2f3c4169378}', 'QCTF{dc5b0f76cc38eab2b65873be6ab141a4}', 'QCTF{409ce63c74d83f9ce93b84b2dac99f0e}', 'QCTF{14bbf0885ee51577e923a940610df25b}', 'QCTF{bd9ba3afdb89eac3af48b181288b60e6}', 'QCTF{3a7ccfa9e0f5f4bb3fa6e0d7adddd05d}', 'QCTF{c3683aff68ec95037e8b42b2369dfcc7}', 'QCTF{2b25420579cc68fc3a911f346f3782e8}', 'QCTF{499d5e798d4f48432f63310f67317b96}', 'QCTF{599a4d1e18414ebf66bba587652eac21}', 'QCTF{c6e73d95a28a9884fbf25b04b411fd3e}', 'QCTF{ff1dc167261043b76244a4e1253da01}', 'QCTF{3d9bef76f9ac25aca1160a474864c90e}', 'QCTF{47481d676b9e0e1cda614d30b091d0d9}', 'QCTF{3b364ccedeaba0f6035c7a9b16e04223}', 'QCTF{d782be90e1db1b4adb189dafa1e61fc3}', 'QCTF{8414dd39ec6370d697b9080786b8dd0c}', 'QCTF{f20c5843bfaf4bf9604ac64529f7b0e}', 'QCTF{59b28531d2e91a580aeae14cdeb8a273}', 'QCTF{19bde6cd19c220d619e11e7c8a6853ff}', 'QCTF{e976d40ddf34653b4ae167ac223caafc}', 'QCTF{f0065e607819919a340fa213ee7a26dc}', 'QCTF{f0dc3428301c8bc15adb0a22c55ccc19}', 'QCTF{70f714846fd03ddacdaa89d5a8a83f3e}', 'QCTF{b58e4132f100642ad71095482fbc612e}', 'QCTF{4078c6840780a0d4dfbc1ea78e74b66c}', 'QCTF{ecdfb31282ca62859c963858d067ac87}', 'QCTF{d6e8ed3cc7619f09b8995c37f3664bdd}', 'QCTF{7eedc188d82303b1271dc513cb58ba2f}', 'QCTF{fe6c6ee730ed333c697976ff2c48a22b}', 'QCTF{bc05b1edbc706745ca91ce7ea9255589}', 'QCTF{e127233ea668b78247d4b1925a005c04}', 'QCTF{92fea22940f1d0ef4560eb395cc5a2f0}', 'QCTF{a89d1bf35f6c3547830de210f8d412ef}', 'QCTF{4bf66a979903fe332b74ec2c06f92885}', 'QCTF{fec0000ecd5267c330e5b59708da6a06}', 'QCTF{16a0ef1f5ded1b7d4d8031d2c11f4ab6}', 'QCTF{1660376cba2d3201c21506636a124cb0}', 'QCTF{9f3bca8d99e5ad2612b4a9030376367e}', 'QCTF{23a0abcc3ee3aef11099766a53e3266e}', 'QCTF{7fc89b1abf47f9f57786377549149c11}', 'QCTF{3029deb05182dd81e73d25cf6a1bdf85}', 'QCTF{f6b0115fa675b7e22d838fa2fa1a6711}', 'QCTF{9afc7f05dee44c4644d8afb3a408ba9d}', 'QCTF{4476185204de1e78d98afb170adbd765}', 'QCTF{6dcaa07bfa64b786077f06f993638f46}', 'QCTF{b81d4c0800576b145974442ccbd755b6}', 'QCTF{928742ef04ca4aa42ed7aaffa9dbd4b5}', 'QCTF{835298d326cd1b01bc36a92d7ca6a7cf}', 'QCTF{7d4396c95f8c1b0d66afbdd9589676e3}', 'QCTF{d0fdff1de01305ba5ea89e33bf679efb}', 'QCTF{6665d6a5e81b0a13b92ad7a3a43e74e9}', 'QCTF{64d1eb67c6a48b14702204d43505985}', 'QCTF{12107145f2999f0d134cfe01a52e4505}', 'QCTF{7e94ca57081d41bc7780e3b6acc8fba9}', 'QCTF{f0e69b571709c27bb646d97182607c32}', 'QCTF{1d4c0bbb9847915120e5e0d154442482}', 'QCTF{aa5cdbdf8dea13f2fef3d40b25f6e655}', 'QCTF{4bf4c607a177df1189959ace9a0a2d74}', 'QCTF{c7c5b7d1c6979ce7d12cb08d4fdbee7b}', 'QCTF{fce6f32dcd48b38452705b31b54840e4}', 'QCTF{567eb21a8cd42b9947af0712f14a11ef}', 'QCTF{7c128e38b8267d693c18350a9ffc41da}', 'QCTF{7a42778c249f300847a1c2e1c8383e67}', 'QCTF{95fd199efcc21c2ab9865e48d5f68be9}', 'QCTF{d46b2ac0524ddda762e866185b81afe8}', 'QCTF{23a7fe731f3eb1b6123aaf8f71f89fbb}', 'QCTF{6bbaeb3db0a3d393f82f0378a1eb846b}', 'QCTF{aa9010d6035951cf3b52c9821f771d6b}', 'QCTF{6c6c10a9da58263e61e3c0f5df3083f6}', 'QCTF{202467ce72d1eadbbe521716d7fd9994}', 'QCTF{4e7676420a6cedb294cf88068fdd0001}', 'QCTF{7e1896b01d6de404ca72dcf3cbaf4848}', 'QCTF{354ca45736ab427026f27f539b91ccc9}', 'QCTF{a5d62dbf84ed39addf8297821b05ffb0}', 'QCTF{27132ee3afea3acd95bc4ae36f39806c}', 'QCTF{3be061b78b2c2e71fe00b65a21328a0f}', 'QCTF{72534e60fada92ec2148bc8dc38e294a}', 'QCTF{ede61e1dcd58d17d548d0ee442f8b9c}', 'QCTF{437018462480e8f60b5c0e5e2925bcf2}', 'QCTF{9a740493085118e4b72957479b289acb}', 'QCTF{8e673da846f5fde4bd4e67790f6c2d08}', 'QCTF{3056fc1b56ddb29a672d7463458d694a}', 'QCTF{c86423ea34ae46b8ea9b13424d9b7752}', 'QCTF{6964c72d5cf141f6be45bad6868d5c7f}', 'QCTF{3b2f10b121089a3b8728191d0caf7312}', 'QCTF{df77aba990d70d5825c0e8bc77114df4}', 'QCTF{2b190985973cdccfdd6f5464e0f8912b}', 'QCTF{21705eb2c93b7d95d2eff099d56ebc77}', 'QCTF{6f6e6a8b6f56680290c74ee338d202ef}', 'QCTF{9b49f73af1bd70b16d47c837f94d78f6}', 'QCTF{a116467f31ff62d38387f48d3256fbe2}', 'QCTF{f721c78ecd374d4ad8d7f76687884e52}', 'QCTF{bec64dc3accdcfd37a6266f3881b3b75}', 'QCTF{c3e46d8159f9b204ca8130d9a45b5a77}', 'QCTF{10bd70ada8f160729fc98479c3544ce3}', 'QCTF{ef51a151361e78a1c4f9cbe854189155}', 'QCTF{c2bb4d862ef473b1df62f9c489db6918}', 'QCTF{a57414a622533a6dcabcacec10ce6358}', 'QCTF{c81003c41edb222f599d2c1b9c7ccf0e}', 'QCTF{9c641cbaac5a45d8b6db74c046e51a6b}', 'QCTF{30eba65740304dadb819a3b6ab89f692}', 'QCTF{f555e5beb936fe326f8526fbffe76770}', 'QCTF{5e0c964eb2ae8de01c99250414727e0d}', 'QCTF{7d686a8156522cdbb6b22bb9690571f2}', 'QCTF{b44af2d9101cd78d99f7e89ebaf01f97}', 'QCTF{d3060a8257c3f51ce7aaeb0f8f866a3e}', 'QCTF{b8457280ade363d9b48548b0c3a67e1}', 'QCTF{30d54e198a62e0d97b43e60b91e32272}', 'QCTF{68d2ec7d87dbb6d0cb84ed3c7fdc0233}', 'QCTF{72161971b31405cbb3ce4c0641a49e31}', 'QCTF{a4f3ab7c3402cb60c5c8720588d23e02}', 'QCTF{1c58ce778d5a5dfd15dbbe66572277da}', 'QCTF{db73fe0d69ea1c921283192ebeb62702}', 'QCTF{83c17115c76e49aa5db6de3b29988726}', 'QCTF{73f51a4ed4d2ace525a304db0ce4c25a}', 'QCTF{e1c05a21252278b31600c91eb48a405a}', 'QCTF{7047486e74229a511fa55417cea002b9}', 'QCTF{bf63134c89301f9b0e5825c793c01e7f}', 'QCTF{c8e12dcae23173565b7a24adbafb330c}', 'QCTF{d14d12c2e2583470a5622119171f3806}', 'QCTF{190d0e5529ad16c45fca03371e495a2a}', 'QCTF{f6f5bf36aad2db904a610a699b9af909}', 'QCTF{a1f02345a963d5658e65b81504023098}', 'QCTF{b8dda136ae1c851cb31158bbac5d146f}', 'QCTF{9da0fd3363e5ec764ebb6cefc17b5992}', 'QCTF{9c3290465b9ed38e01bf9bee34b83263}', 'QCTF{a6d0036abcb92d4ad054bf09b261d9e6}', 'QCTF{c746b1b7901a751e22ae291a45adbe33}', 'QCTF{4722a62552d7b1befb8cfabc3974cf8f}', 'QCTF{bcb818db91e59a7ec2e62ab732d27df3}', 'QCTF{8fa34dd617f2001847406a9da51a87f5}', 'QCTF{f0fe5b744e83c674a1ffa6e1717f247f}', 'QCTF{69c1500d0b5a43f8350dd5a74c38e48}', 'QCTF{68b4f121e675c7fbcf766c900ead6e31}', 'QCTF{583e288d38591232ee89ca5a23f2dcb2}', 'QCTF{237dea0d71825be46afb37ae993132e7}', 'QCTF{5aeea2c59f5596717faa3df817f7a100}', 'QCTF{78cffa194eb4350b47ef60c70f541880}', 'QCTF{cbb9fa0b5d46bd0f8b859ac5362efbc4}', 'QCTF{f25e46978fc915648e1861cebb335d6a}', 'QCTF{657d0ba31287885b8070d6a14b52e5ec}', 'QCTF{84b0ac637a4f91c20f2c567075121990}', 'QCTF{ef085c6cb638685c2594ad86227accf3}', 'QCTF{2b5f36111b6e05a5f75d434a9b9d7d03}', 'QCTF{36663985b129e018deaf2abaf6f326f1}', 'QCTF{63953bd9f06f3e178be8bbc809c2149}', 'QCTF{348f25e5e59f08db03a726b106ffb610}', 'QCTF{8394b30dc961b4b1490eeb171f385994}', 'QCTF{bd371857a834c2e32b13dff53ce38668}', 'QCTF{9c40fd6c30caf97cbf9843db1833da27}', 'QCTF{e212c103c794018b1acc5698573dea00}', 'QCTF{6eb80e36d276e2345af98d47724507b8}', 'QCTF{6e7197c63830c930e0947ee25abb2f78}', 'QCTF{326bea6b25c92b13af73d3d87127ff1b}', 'QCTF{5e907a31a3331d23ef76f6d9b4cdc391}', 'QCTF{aea70701e952a51f47fae051fa8a36b0}', 'QCTF{deb7def97eaf359ab3a0adc7e8be9c29}', 'QCTF{ab7197cbe92f054491e615703138e9c8}', 'QCTF{5a42e2cb0ec09903fa463153e1e78381}', 'QCTF{5cd7043faf609a56a6e0c16eeed272da}', 'QCTF{def3cf10a26b339d4e488f4570c3f28a}', 'QCTF{8c3ac45af097221231708762f09f4bda}', 'QCTF{5688223124a23f3e857022023454fe9a}', 'QCTF{f5cc1dc7c66506b0c6f7d1e41cd71f41}', 'QCTF{8bf67270e6750d0ecb2891060fce93a3}', 'QCTF{83f6cf49d6675bee851e1ef8f712294f}', 'QCTF{e172e8eee527352960785d7c6b426c9a}', 'QCTF{79d662d7b1a064d5703c2453f6a60e76}', 'QCTF{4516b1272a8a43773dc4314b9cec511b}', 'QCTF{e5cfbd58b9e0a6bc3706498a118d9d8a}', 'QCTF{ef3b55a3f6eb52ea5d0d351616092196}', 'QCTF{e1a81ff20877789ebbd3a3b318247ff7}', 'QCTF{449209c23b235b865591f2714969aaa7}', 'QCTF{6ba8969c6814c151ec816e3c20161167}', 'QCTF{ad6fff69f95afb7b71c371da9dd34595}', 'QCTF{b913c28b3defe7d96413351dec6c57d}', 'QCTF{c58d58f7dbba21898b239fad3aa26b17}', 'QCTF{579210bf72afa3de2e53d0e819a67f01}', 'QCTF{bb0117ede447c87f32bf075d91b6807d}', 'QCTF{bcd5c970aad964d3175a2049e1d781fe}', 'QCTF{1f7a21f89fc35136f6b194c3ea75fc7e}', 'QCTF{5f70da06f2ad30ec4b63fee7d2ab08cb}', 'QCTF{f56ac65ae29dd6d0945cbf3ed2cea373}', 'QCTF{f58a4c303169be0f4564152f59f76b6a}', 'QCTF{1437e8341cbc1fd1d69a29366aee2df1}', 'QCTF{37137d5b977c148759d620193190048c}', 'QCTF{fdc481c2f4c33e47d828ac7f7a9448a9}', 'QCTF{248620071580a5b38323ae10e713ea99}']
def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(is_answer_correct=True)
if attempt.answer in flags:
return CheckedPlagiarist(is_answer_correct=False, plagiarized_from=flags.index(attempt.answer))
return Checked(is_answer_correct=False)
| 2,550 | 25,172 | 0.855451 |
7953d84948d39036b6961c0fed856a5f8a858400 | 180 | py | Python | array/sorted_array_squares.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | array/sorted_array_squares.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | array/sorted_array_squares.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | def sorted_squares(nums):
squares = [num ** 2 for num in nums]
return sorted(squares)
print(sorted_squares([-4, -1, 0, 3, 10]))
print(sorted_squares([-7, -3, 2, 3, 11]))
| 22.5 | 41 | 0.627778 |
7953d87d0c3f4d8a9f273f5aced368b3f4892d07 | 436 | py | Python | phdhelper/helpers/os_shortcuts.py | jmsplank/phdhelper | c06dd06669b42dbe4c9e1a6eeec3d0ad3885d2eb | [
"MIT"
] | null | null | null | phdhelper/helpers/os_shortcuts.py | jmsplank/phdhelper | c06dd06669b42dbe4c9e1a6eeec3d0ad3885d2eb | [
"MIT"
] | null | null | null | phdhelper/helpers/os_shortcuts.py | jmsplank/phdhelper | c06dd06669b42dbe4c9e1a6eeec3d0ad3885d2eb | [
"MIT"
] | null | null | null | import os
def get_path(file, level="."):
path = os.path.dirname(os.path.realpath(file))
levels = level.count(".")
if levels > 1:
for l in range(levels - 1):
path = "/".join(path.split("/")[:-1])
return path
def new_path(path, extension=""):
def f(filename):
# return path + "/" + extension
return f"{path}{'/' + extension if extension != '' else ''}/{filename}"
return f
| 22.947368 | 79 | 0.548165 |
7953d98bcbb826267fa21f6503e55049c8aff5ba | 118,515 | py | Python | python/paddle/fluid/framework.py | ShinanWu/Paddle | 0d276d38b456b7e77cd69903939edb63cc34f73c | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/framework.py | ShinanWu/Paddle | 0d276d38b456b7e77cd69903939edb63cc34f73c | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/framework.py | ShinanWu/Paddle | 0d276d38b456b7e77cd69903939edb63cc34f73c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
from collections import defaultdict
from collections import Iterable
import contextlib
from .wrapped_decorator import signature_safe_contextmanager
import os
import re
import traceback
import six
import numpy as np
import subprocess
import multiprocessing
from .. import compat as cpt
from .proto import framework_pb2
try:
if os.name == 'nt':
import sys
third_lib_path = os.path.abspath(os.path.dirname(
__file__)) + os.sep + '..' + os.sep + 'libs'
os.environ['path'] += ';' + third_lib_path
sys.path.append(third_lib_path)
from . import core
except ImportError as e:
if os.name == 'nt':
executable_path = os.path.abspath(os.path.dirname(sys.executable))
raise ImportError(
"""NOTE: You may need to run \"set PATH=%s;%%PATH%%\"
if you encounters \"DLL load failed\" errors. If you have python
installed in other directory, replace \"%s\" with your own
directory. The original error is: \n %s""" %
(executable_path, executable_path, cpt.get_exception_message(e)))
else:
raise ImportError(
"""NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\"
if you encounters \"libmkldnn.so not found\" errors. If you have python
installed in other directory, replace \"/usr/local/lib\" with your own
directory. The original error is: \n""" + cpt.get_exception_message(e))
except Exception as e:
raise e
from . import unique_name
__all__ = [
'Program',
'default_startup_program',
'default_main_program',
'program_guard',
'name_scope',
'cuda_places',
'cpu_places',
'cuda_pinned_places',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
TEMP_VAR_NAME = core.kTempVarName()
GRAD_VAR_SUFFIX = core.kGradVarSuffix()
ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_dygraph_tracer_ = None
_dygraph_current_expected_place_ = None
def _in_dygraph_mode():
return _dygraph_tracer_ is not None
def _dygraph_tracer():
return _dygraph_tracer_
def _current_expected_place():
return _dygraph_current_expected_place_
def _cpu_num():
return int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
def cuda_places(device_ids=None):
'''
Create a list of :code:`fluid.CUDAPlace` objects.
If :code:`device_ids` is None, environment variable of
:code:`FLAGS_selected_gpus` would be checked first. If
:code:`FLAGS_selected_gpus=0,1,2`, the returned list would
be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
If :code:`FLAGS_selected_gpus` is not set, all visible
gpu places would be returned.
If :code:`device_ids` is not None, it should be the device
ids of gpus. For example, if :code:`device_ids=[0,1,2]`,
the returned list would be
[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
Args:
device_ids (None|list(int)|tuple(int)): gpu device id list.
Returns:
out (list(fluid.CUDAPlace)): gpu place list.
'''
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_ids is None:
gpus_env = os.getenv("FLAGS_selected_gpus")
if gpus_env:
device_ids = [int(s) for s in gpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_cuda_device_count())
elif not isinstance(device_ids, (list, tuple)):
device_ids = [device_ids]
return [core.CUDAPlace(dev_id) for dev_id in device_ids]
def cpu_places(device_count=None):
'''
Create a list of :code:`fluid.CPUPlace` objects.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the device count would
be determined by :code:`multiprocessing.cpu_count()`.
Args:
device_count (None|int): device number.
Returns:
out (list(fluid.CPUPlace)): cpu place list.
'''
if device_count is None:
device_count = _cpu_num()
return [core.CPUPlace()] * device_count
def cuda_pinned_places(device_count=None):
'''
Create a list of :code:`fluid.CUDAPinnedPlace` objects.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the device count would
be determined by :code:`multiprocessing.cpu_count()`.
Args:
device_count (None|int): device number.
Returns:
out (list(fluid.CUDAPinnedPlace)): cuda pinned place list.
'''
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_count is None:
device_count = _cpu_num()
return [core.cuda_pinned_places()] * device_count
class NameScope(object):
def __init__(self, name="", parent=None):
self._children = dict()
self._name = name
self._parent = parent
def child(self, prefix):
if prefix not in self._children:
new_child = NameScope(prefix, self)
self._children[prefix] = [new_child]
else:
new_child = NameScope(prefix + "_%d" % len(self._children[prefix]),
self)
self._children[prefix].append(new_child)
return new_child
def parent(self):
return self._parent
def name(self):
return self._name
_name_scope = NameScope()
@signature_safe_contextmanager
def name_scope(prefix=None):
"""
Generate hierarchical name prefix for the operators.
Note: This should only used for debugging and visualization purpose.
Don't use it for serious analysis such as graph/program transformations.
Args:
prefix(str): prefix.
Examples:
.. code-block:: python
with name_scope("encoder"):
...
with name_scope("decoder"):
...
with name_scope("attention"):
...
"""
# TODO(panyx0718): Only [0-9a-z].
assert prefix, "namescope prefix cannot be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
yield
_name_scope = _name_scope.parent()
def _full_name_scope():
global _name_scope
scope = _name_scope
name = ""
while scope:
name = scope.name() + "/" + name
scope = scope.parent()
return name
def generate_control_dev_var_name():
import random
return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random())
def grad_var_name(var_name):
"""
Returns:
str: gradient name for a certain var name
"""
return var_name + GRAD_VAR_SUFFIX
def convert_np_dtype_to_dtype_(np_dtype):
"""
Convert the data type in numpy to the data type in Paddle
Args:
np_dtype(np.dtype): the data type in numpy.
Returns:
core.VarDesc.VarType: the data type in Paddle.
"""
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.VarDesc.VarType.FP32
elif dtype == np.float64:
return core.VarDesc.VarType.FP64
elif dtype == np.float16:
return core.VarDesc.VarType.FP16
elif dtype == np.int32:
return core.VarDesc.VarType.INT32
elif dtype == np.int16:
return core.VarDesc.VarType.INT16
elif dtype == np.int64:
return core.VarDesc.VarType.INT64
elif dtype == np.bool:
return core.VarDesc.VarType.BOOL
elif dtype == np.uint16:
return core.VarDesc.VarType.INT16
elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8
elif dtype == np.int8:
return core.VarDesc.VarType.INT8
else:
raise ValueError("Not supported numpy dtype %s" % dtype)
def dtype_is_floating(dtype):
"""
Check the data type is floating or not.
Args:
dtype(np.dtype|core.VarDesc.VarType): data type.
Could be numpy format or Paddle format
Returns(bool): True if data type is a float value
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP64
]
def _debug_string_(proto, throw_on_error=True):
"""
Get the debug string of a protobuf message. The message could be not
initialized.
Args:
proto(google.protobuf.message.Message): The protobuf message
throw_on_error(bool): True if raise an error when the protobuf message
is not initialized.
Returns(str): The debug string of the protobuf message
"""
error_fields = list()
if not proto.IsInitialized(error_fields) and throw_on_error:
raise ValueError("{0} are not initialized.\nThe message is {1}:\n".
format(error_fields, proto))
return proto.__str__()
class Variable(object):
"""
In Fluid, every input and output of an operator is a variable. In most
cases, variables are used for holding different kinds of data or training
labels. A variable belongs to a block. All variable has its own name and
two variables in different blocks could have the same name.
There are many kinds of variables. Each kind of them has its own attributes
and usages. Please reference the framework.proto for details.
Most of a Variable's member variables can be setted to be None. It mean
it is not available or will be specified later.
Args:
block(Block): The block that the variable belongs to.
type(core.VarDesc.VarType): Variable type. Please reference the
framework.proto for details.
name(str|None): The name of the variable. If setted None, it will be
generated automatically. Default: None
shape(tuple|list|None): The shape of the variable. -1 means the batch size.
Some kinds of variable do not contain shape, just set it to None.
Default: None
dtype(np.dtype|core.VarDesc.VarType|str|None): The data type of variable.
Default: None
lod_level (int|None): The level of lod tensor. 0 means it is not a time
series data.
Default: None
capacity (int|None): The capacity of Channel variable. Ignored for other
types. Default: None
persistable (bool|None): True if the variable is persistable. A persistable
variable will not be deleted after an iteration ending. Defaults: None.
error_clip (BaseErrorClipAttr|None): The error clip attributes of the
corresponding gradient variable. Default: None
stop_gradient (bool): True if the variable will stop to calculate its
gradients when backward. Default: False.
is_data (bool): True if the variable is an input data. Default: False
Notes:
The constructor of Variable should not be invoked directly. Please
use `Block.create_var` to create a variable.
Examples:
.. code-block:: python
cur_program = Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
"""
def __init__(self,
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
lod_level=None,
capacity=None,
persistable=None,
error_clip=None,
stop_gradient=False,
is_data=False,
**kwargs):
self.block = block
if name is None:
name = unique_name.generate('_generated_var')
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if _in_dygraph_mode():
# record vars in tracer rather than blocks
self._ivar = kwargs.get("ivar", None)
if not self._ivar:
self._ivar = core.VarBase(
name, dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [],
_current_expected_place(), stop_gradient, True
if persistable else False)
if persistable:
_dygraph_tracer().trace_var(name, self)
else:
self.error_clip = error_clip
is_new_var = False
name = cpt.to_text(name)
self.desc = self.block.desc.find_var(cpt.to_bytes(name))
if self.desc is None:
self.desc = self.block.desc.var(cpt.to_bytes(name))
is_new_var = True
if is_new_var:
self.desc.set_type(type)
elif self.desc.type() != type:
raise ValueError(
"Variable {0} has been created before. The "
"previous type is {1}; the new type is {2}. They"
" are not matched".format(self.name, self.desc.type(),
type))
if shape is not None:
if is_new_var:
self.desc.set_shape(shape)
else:
old_shape = self.shape
shape = tuple(shape)
if shape != old_shape:
raise ValueError(
"Variable {0} has been created before. the previous "
"shape is {1}; the new shape is {2}. They are not "
"matched.".format(self.name, old_shape, shape))
if dtype is not None:
if is_new_var:
self.desc.set_dtype(dtype)
else:
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError(
"Variable {0} has been created before. "
"The previous data type is {1}; the new "
"data type is {2}. They are not "
"matched.".format(self.name, old_dtype, dtype))
if lod_level is not None:
if is_new_var:
self.desc.set_lod_level(lod_level)
else:
if lod_level != self.lod_level:
raise ValueError(
"Variable {0} has been created before. "
"The previous lod_level is {1}; the new "
"lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level,
lod_level))
if persistable is not None:
if is_new_var:
self.desc.set_persistable(persistable)
else:
if persistable != self.persistable:
raise ValueError(
"Variable {0} has been created before."
"The previous persistable is {1}; the new "
"persistable is {2}. They are not matched".format(
self.name, self.persistable, persistable))
if capacity is not None:
if is_new_var:
self.desc.set_capacity(capacity)
else:
# TODO(abhinavarora) : Compare with set capacity once,
# get_capacity is implemented
pass
self.block.vars[name] = self
self.op = None
self.stop_gradient = stop_gradient
self.is_data = is_data
def _numpy(self):
new_ivar = self._ivar._copy_to(core.CPUPlace(), True)
return np.array(new_ivar.value().get_tensor())
def _backward(self):
self._ivar._run_backward()
def _gradient(self):
new_ivar = self._ivar._grad_ivar()._copy_to(core.CPUPlace(), True)
return np.array(new_ivar.value().get_tensor())
def _clear_gradient(self):
self._ivar._clear_gradient()
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error(bool): True if raise an exception when self is
not initialized.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when
with_details is True. Default False;
Returns:
str: The debug string.
"""
if _in_dygraph_mode():
# TODO(panyx0718): add more dygraph debug info.
return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype,
self.shape)
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
protostr = self.desc.serialize_to_string()
proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
if with_details:
additional_attr = ("error_clip", "stop_gradient")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (
attr_name, six.binary_type(getattr(self, attr_name)))
return res_str
__repr__ = __str__
def _set_desc(self, input):
"""
Set the variable description.
Args:
input(core.VarDesc): The new VarDesc.
Returns:
None
"""
self.desc = input
@property
def _stop_gradient(self):
if _in_dygraph_mode():
return self._ivar.stop_gradient
else:
return self.stop_gradient
@_stop_gradient.setter
def _stop_gradient(self, s):
if _in_dygraph_mode():
self._ivar.stop_gradient = s
else:
self.stop_gradient = s
@property
def persistable(self):
if _in_dygraph_mode():
return self._ivar.persistable
else:
return self.desc.persistable()
@persistable.setter
def persistable(self, p):
if _in_dygraph_mode():
return self._ivar.persistable
else:
self.desc.set_persistable(p)
@property
def name(self):
if _in_dygraph_mode():
return self._ivar.name
else:
return cpt.to_text(self.desc.name())
@name.setter
def name(self, new_name):
if _in_dygraph_mode():
self._ivar.name = new_name
else:
self.desc.set_name(new_name)
@property
def shape(self):
# convert to tuple, make it as same as numpy API.
if _in_dygraph_mode():
return self._ivar.shape
else:
return tuple(self.desc.shape())
@property
def dtype(self):
if _in_dygraph_mode():
return self._ivar.dtype
else:
return self.desc.dtype()
@property
def lod_level(self):
# TODO(minqiyang): Support lod_level in dygraph mode
return self.desc.lod_level()
@property
def type(self):
if _in_dygraph_mode():
return self._ivar.dtype
else:
return self.desc.type()
def _set_error_clip(self, error_clip):
"""
Set the error_clip.
Args:
error_clip(BaseErrorClipAttr) : The new error_clip.
Returns:
None
"""
self.error_clip = error_clip
def _slice_indices(self, slice, length):
"""
Reference implementation for the slice.indices method.
"""
# Compute step and length as integers.
step = 1 if slice.step is None else slice.step
# Raise ValueError for negative length or zero step.
if length < 0:
raise ValueError("length should not be negative")
if step == 0:
raise ValueError("slice step cannot be zero")
# Find lower and upper bounds for start and stop.
lower = -1 if step < 0 else 0
upper = length - 1 if step < 0 else length
# Compute start.
if slice.start is None:
start = upper if step < 0 else lower
else:
start = slice.start
start = max(start + length, lower) if start < 0 else min(start,
upper)
# Compute stop.
if slice.stop is None:
stop = lower if step < 0 else upper
else:
stop = slice.stop
stop = max(stop + length, lower) if stop < 0 else min(stop, upper)
return start, stop, step
def _detectEllipsis(self, item):
has_ellipsis = False
start = 0
end = len(self.shape)
for index, o in enumerate(item):
if o is Ellipsis:
if has_ellipsis:
raise ValueError("Index can have one ellipsis only.")
has_ellipsis = True
start = index
else:
if has_ellipsis:
end = index
return has_ellipsis, start, end
def _reconstructSliceinfo(self, item):
has_ellipsis, start, end = self._detectEllipsis(item)
if has_ellipsis:
newitem = []
for i in range(start):
newitem.append(item[i])
for i in range(start, end):
newitem.append(slice(None, None, None))
for i in range(end, len(item)):
newitem.append(item[i])
return newitem
else:
return None
def _detectContinuesSlice(self, item):
starts = []
ends = []
for index, o in enumerate(item):
if isinstance(o, int):
start = int(o)
if (index > 0 and index >= self.shape[index]) \
or (index < 0 and (index + self.shape[index]) < 0):
raise IndexError("invalid index")
start = max(start + self.shape[index], 0) if start < 0 else min(
start, self.shape[index])
starts.append(start)
ends.append(start + 1)
elif isinstance(o, slice):
start, stop, step = self._slice_indices(o, self.shape[index])
if step == 1 or step == -1:
starts.append(start)
ends.append(stop)
else:
return False, None
else:
raise IndexError("Valid index accept int or slice or ellipsis")
return True, [starts, ends]
def _cloneVar(self, copy=False):
if not copy:
return self.block.create_var(
name=unique_name.generate(".".join(self.name)),
dtype=self.dtype,
persistable=self.persistable,
stop_gradient=self._stop_gradient, )
else:
return self
def _sliceVar(self, axes, starts, ends):
new_var = self._cloneVar()
self.block.append_op(
type="slice",
inputs={'Input': [self]},
outputs={'Out': [new_var]},
attrs={'axes': axes,
'starts': starts,
'ends': ends})
return new_var
def _concatVar(self, inputs, axis):
new_var = self._cloneVar()
self.block.append_op(
type="concat",
inputs={'X': inputs},
outputs={'Out': [new_var]},
attrs={'axis': axis, })
return new_var
def _sliceAndConcatVar(self, item, axis):
if isinstance(item, slice):
if self.shape[axis] < 0:
return self._cloneVar(True)
start, stop, step = self._slice_indices(item, self.shape[axis])
if step == 1:
return self._sliceVar([axis], [start], [stop])
else:
vars = []
if step > 0:
while start < stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
else:
while start > stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
return self._concatVar(vars, axis)
elif isinstance(item, int):
if self.shape[axis] < 0:
return self._cloneVar(True)
index = int(item)
if (index > 0 and index >= self.shape[axis])\
or (index < 0 and (index + self.shape[axis]) < 0):
raise IndexError("invalid index")
return self._sliceVar([axis], [index], [index + 1])
else:
raise IndexError("Valid index accept int or slice or tuple")
def __getitem__(self, item):
"""
Slice the variable.
Args:
item(int/slice/tuple) : the index.
Returns:
Sliced variable
"""
new_var = None
if isinstance(item, tuple):
if len(item) > len(self.shape):
raise IndexError("Too many indexes")
fixedSize = True
for i in range(len(self.shape)):
if self.shape[i] == -1:
fixedSize = False
break
newitem = self._reconstructSliceinfo(item) or item
if fixedSize:
check, info = self._detectContinuesSlice(newitem)
if check:
starts = info[0]
ends = info[1]
axes = [i for i in range(len(starts))]
return self._sliceVar(axes, starts, ends)
else:
new_var = self
for index, o in enumerate(newitem):
new_var = new_var._sliceAndConcatVar(o, index)
else:
new_var = self
for index, o in enumerate(newitem):
new_var = new_var._sliceAndConcatVar(o, index)
else:
new_var = self._sliceAndConcatVar(item, 0)
return new_var
def get_all_op_protos():
"""
Get all registered op proto from PaddlePaddle C++ end.
Returns:
list: list of OpProto.
"""
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
ret_values.append(op_proto)
return ret_values
class OpProtoHolder(object):
"""
A global variable to hold all OpProtos from C++ as a map
"""
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def __init__(self):
assert not hasattr(
self.__class__,
'_instance'), 'Please use `instance()` to get OpProtoHolder object!'
op_protos = get_all_op_protos()
self.op_proto_map = {}
for proto in op_protos:
self.op_proto_map[proto.type] = proto
def get_op_proto(self, type):
"""
Get OpProto by a type string.
Args:
type(str): The type that operator registered in C++ side.
Returns(framework_pb2.OpProto): The OpProto
"""
if type not in self.op_proto_map:
raise ValueError("Operator \"%s\" has not been registered." % type)
return self.op_proto_map[type]
@staticmethod
def generated_op_attr_names():
return {
core.op_proto_and_checker_maker.kOpRoleAttrName(),
core.op_proto_and_checker_maker.kOpRoleVarAttrName(),
core.op_proto_and_checker_maker.kOpNameScopeAttrName(),
core.op_proto_and_checker_maker.kOpCreationCallstackAttrName()
}
class Operator(object):
"""
In Fluid, all the operation are represented by Operator, and Operator
is regarded as a build in an instruction of a Block. Users can use the
build in instructions to describe their neural network.
Args:
block(Block): The block has the current operator.
desc(core.OpDesc): The protobuf description of Operator.
type(str): The type of operator. Default None.
inputs(dict): The input of this Operator. it is a dictionary, for every
element, key is the input parameter name, and value is a list of
variables. Default None.
outputs(dict): The output of this Operator. it is a dictionary, for
every element, key is the input parameter name, and value is a list
of variables. Default None.
attrs(dict): The attributes of this Operator. it is a dictionary, for
every element, key is attribute name, and value is the attribute value.
The attribute type should be as same as the type registered in C++ side.
Default None.
Returns:
Operator: The initialized Operator.
Raises:
ValueError: If the passed input, output and attrs doesn't match the
initializing Operator's that registered in C++ side.
Notes:
The constructor of operator should not be invoked directly. Use
Block.append_op or Block._prepend_op instead.
Examples:
.. code-block:: python
cur_program = Program()
cur_block = cur_program.current_block()
# var1 += var2 + var3
cur_block.append_op(type="sum",
inputs={"X": [var1, var2, var3]},
outputs={"Out": [var1]})
"""
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
'ncclInit', 'select', 'checkpoint_notify', 'gen_nccl_id'
}
def __init__(self,
block,
desc,
type=None,
inputs=None,
outputs=None,
attrs=None):
if _in_dygraph_mode():
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
self.iop = core.OpBase(type)
# TODO(minqiyang): remove these lines after we take apart all
# backward grads and forward variables
self.inputs = defaultdict(list)
if inputs is not None:
for k, v in six.iteritems(inputs):
if isinstance(v, Variable):
self.inputs[k].append(v._ivar)
elif isinstance(v, list) or isinstance(v, tuple):
self.inputs[k].extend([var._ivar for var in v])
self.outputs = defaultdict(list)
if outputs is not None:
for k, v in six.iteritems(outputs):
if isinstance(v, Variable):
self.outputs[k].append(v._ivar)
elif isinstance(v, list) or isinstance(v, tuple):
self.outputs[k].extend([var._ivar for var in v])
self.attrs = attrs if attrs else {}
else:
self.block = block
self.desc = desc
# note: not add self.attrs here:
# https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173
op_attrs = attrs
if op_attrs is None:
op_attrs = dict()
del attrs
op_maker = core.op_proto_and_checker_maker
if op_maker.kOpRoleAttrName() not in op_attrs:
op_attrs[op_maker.kOpRoleAttrName(
)] = self.block.program.op_role
role_var_name = op_maker.kOpRoleVarAttrName()
if len(self.block.program.
op_role_var) != 0 and role_var_name not in op_attrs:
op_attrs[role_var_name] = self.block.program.op_role_var
if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:
del op_attrs[role_var_name]
if len(self.desc.type()) != 0:
return
if type is None:
raise ValueError(
"`type` to initilized an Operator can not be None.")
else:
callstack_var_name = op_maker.kOpCreationCallstackAttrName()
op_attrs[callstack_var_name] = list(
reversed(traceback.format_stack()))[1:]
self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type)
namescope_var_name = op_maker.kOpNameScopeAttrName()
op_attrs[namescope_var_name] = _full_name_scope()
def find_name(var_list, name):
for var_name in var_list:
if var_list[var_name] is not None and var_name == name:
return True
return False
if inputs is not None:
for in_proto in proto.inputs:
found = find_name(inputs, in_proto.name)
assert found or in_proto.dispensable, "Input {} not found".format(
in_proto.name)
if found:
in_args = inputs[in_proto.name]
if not isinstance(in_args, list):
in_args = [in_args]
if not in_proto.duplicable and len(in_args) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given."
% (in_proto.name, len(in_args)))
in_arg_names = []
for arg in in_args:
if isinstance(arg, six.string_types):
in_arg_names.append(arg)
elif isinstance(arg, six.binary_type):
in_arg_names.append(arg.decode())
else:
in_arg_names.append(cpt.to_text(arg.name))
self.desc.set_input(in_proto.name, in_arg_names)
else:
self.desc.set_input(in_proto.name, [])
if outputs is not None:
for m in proto.outputs:
if (m.name not in outputs) and m.dispensable:
continue
if not ((m.name in outputs) or m.dispensable):
raise ValueError(("Incorrect setting for output(s) of "
"operator \"%s\", should set: [%s].")
% (type, m.name))
for out_proto in proto.outputs:
if out_proto.name not in outputs:
continue
out_args = outputs[out_proto.name]
if not isinstance(out_args, list):
out_args = [out_args]
if not out_proto.duplicable and len(out_args) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given."
% (out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
out_arg_names.append(cpt.to_text(arg.name))
# TODO(minqiyang): could we remove variable's op in static mode?
if not _in_dygraph_mode():
arg.op = self
self.desc.set_output(out_proto.name, out_arg_names)
if op_attrs is not None:
if not isinstance(op_attrs, dict):
raise TypeError("'attrs' should be a dict.")
for attr in proto.attrs:
attr_name = attr.name
if (attr_name not in op_attrs) or (
op_attrs[attr_name] is None):
continue
attr_val = op_attrs[attr_name]
self._update_desc_attr(attr_name, attr_val)
self.desc.check_attrs()
if self._has_kernel(type):
self.desc.infer_var_type(self.block.desc)
self.desc.infer_shape(self.block.desc)
def _has_kernel(self, op_type):
return op_type not in self.OP_WITHOUT_KERNEL_SET
def to_string(self, throw_on_error):
"""
Get debug string.
Args:
throw_on_error(bool): Whether to raise exception if self is not
initialized.
Returns:
str: The debug string.
"""
protostr = self.desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
return _debug_string_(proto, throw_on_error)
def __str__(self):
return self.to_string(True)
__repr__ = __str__
@property
def type(self):
if _in_dygraph_mode():
return self.iop.type
else:
return self.desc.type()
def input(self, name):
"""
Get the input arguments according to the input parameter name.
Args:
name(str): The input parameter name.
Returns:
list: return the list of argument names that associated with \
the specific parameter name.
"""
return self.desc.input(name)
def _rename_input(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's input.
new_name(str): The new name of the Operator's input.
Returns:
None
"""
self.desc._rename_input(old_name, new_name)
def _rename_output(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's output.
new_name(str): The new name of the Operator's output.
Returns:
None
"""
self.desc._rename_output(old_name, new_name)
@property
def input_names(self):
return self.desc.input_names()
@property
def input_arg_names(self):
return self.desc.input_arg_names()
@property
def output_arg_names(self):
return self.desc.output_arg_names()
def output(self, name):
"""
Get output arguments by the output parameter name.
Args:
name(str): The output parameter name.
Returns:
list: return the list of argument names associated with \
the specific parameter name.
"""
return self.desc.output(name)
@property
def output_names(self):
return self.desc.output_names()
@property
def idx(self):
for i, op in enumerate(self.block.ops):
if op == self:
return i
raise ValueError(
"Can't find op itself in it's block. It could be a bug of Paddle.")
def has_attr(self, name):
"""
Whether this Operator has the attribute with name or not.
Args:
name(str): the attribute name.
Returns:
bool: True if has this attribute.
"""
return self.desc.has_attr(name)
def attr_type(self, name):
"""
Get the type of attribute by attribute's name.
Args:
name(str): the attribute name.
Returns:
core.AttrType: the attribute type.
"""
return self.desc.attr_type(name)
def _set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
self._update_desc_attr(name, val)
def _remove_attr(self, name):
self.desc.remove_attr(name)
def _update_desc_attr(self, name, val):
"""
Update the value of desc's attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
if isinstance(val, Block):
self.desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
self.desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
self.desc.set_serialized_attr(name, val.serialize_to_string())
else:
self.desc._set_attr(name, val)
@property
def attr_names(self):
return self.desc.attr_names()
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
return self.desc.attr(name)
def _block_attr_id(self, name):
"""
Get the block attribute's id by name.
Args:
name(str): the attribute name.
Returns:
int: the block index.
"""
return self.desc._block_attr_id(name)
def _block_attr(self, name):
"""
Get the block attribute by name.
Args:
name(str): the attribute name.
Returns:
block: the block attribute.
"""
id = self._block_attr_id(name)
assert (id >= 0 and id < len(self.block.program.blocks))
return self.block.program.blocks[id]
def _blocks_attr(self, name):
"""
Get the blocks attribute by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks attribute.
"""
attrs = []
for i in self._blocks_attr_ids(name):
assert (i >= 0 and i < len(self.block.program.blocks))
attrs.append(self.block.program.blocks[i])
return attrs
def _blocks_attr_ids(self, name):
"""
Get the blocks attribute's ids by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks ids.
"""
return self.desc._blocks_attr_ids(name)
def all_attrs(self):
"""
Get the attribute dict.
Returns:
dict: The Operator's attribute dict, name->attr.
"""
attr_names = self.attr_names
attr_map = {}
for n in attr_names:
attr_type = self.desc.attr_type(n)
if attr_type == core.AttrType.BLOCK:
attr_map[n] = self._block_attr(n)
continue
if attr_type == core.AttrType.BLOCKS:
attr_map[n] = self._blocks_attr(n)
continue
attr_map[n] = self.attr(n)
return attr_map
class Block(object):
"""
In Fluid, a Program is consistence of multi-Block, and Block stores
VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.
One block could have some child blocks, and child block's name scopes
should inherit the parent's so that OpDesc in child block can reference
a VarDesc that is stored in the parent block.
Please reference the framework.proto for details.
Args:
program(Program): The Program that the Block belongs to.
idx(int): The block's id in the Program.
Notes:
The constructor of Block should not be invoked directly. Please
use `Program._create_block()` to create a block.
Examples:
.. code-block:: python
cur_program = Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
"""
def __init__(self, program, idx):
self.desc = program.desc.block(idx)
self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list
self.program = program
self.removed_vars = collections.OrderedDict()
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when
with_details is True. Default False.
Returns:
str: The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
re_add_indent = re.compile(r"\n(.)")
res_str = "blocks {\n idx: %d\n parent_idx: %d" % (
self.idx, self.parent_idx)
for var in list(self.vars.values()):
res_str += "\n vars {\n %s }" % re_add_indent.sub(
r"\n \1", var.to_string(throw_on_error, with_details))
for op in self.ops:
res_str += "\n ops {\n %s }" % re_add_indent.sub(
r"\n \1", op.to_string(throw_on_error))
res_str += "\n}"
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.BlockDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
__repr__ = __str__
@property
def parent_idx(self):
return self.desc.parent
@property
def forward_block_idx(self):
return self.desc.get_forward_block_idx()
def _set_forward_block_idx(self, idx):
"""
Set the forward block Idx.
Args:
idx(int): the block index.
Returns:
None
"""
self.desc._set_forward_block_idx(idx)
@property
def idx(self):
return self.desc.id
def var(self, name):
"""
Get a Variable by name from this block.
Args:
name(str): the Variable's name.
Raises:
ValueError: The If input's type is not str, or this block
doesn't have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"var require string as parameter, but get %s instead." %
(type(name)))
v = self.vars.get(name, None)
if v is None:
raise ValueError("var %s not in this block" % name)
return v
def _find_var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Returns:
Variable: the Variable with the giving name. Or None if not found.
"""
frontier = list()
visited = set()
frontier.append(self)
prog = self.program
while len(frontier) != 0: # BFS
cur = frontier[0]
frontier = frontier[1:]
if id(cur) in visited:
continue
if cur.has_var(name):
return cur.var(name)
if cur.parent_idx != -1:
frontier.append(prog.block(cur.parent_idx))
if cur.forward_block_idx != -1:
frontier.append(prog.block(cur.forward_block_idx))
visited.add(id(cur))
return None
def _var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Raises:
ValueError: this block and this parent block doesn't
have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
var = self._find_var_recursive(name)
if var:
return var
else:
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self.iter_parameters())
def iter_parameters(self):
return (item[1] for item in six.iteritems(self.vars)
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](var, self)
return var
def has_var(self, name):
return name in self.vars
def _rename_var(self, name, new_name):
"""
Rename variable in vars and ops' inputs and outputs
Args:
name(str): the name that need to be renamed.
new_name(str): the name that need to rename to.
Raises:
ValueError: If this block doesn't have this the giving name,
or the type of the var with the giving name is not Parameter
or Variable.
Returns:
Variable: the Variable with the giving name.
"""
name = cpt.to_text(name)
new_name = cpt.to_text(new_name)
if not self.has_var(name):
raise ValueError("var %s is not in current block" % name)
v = self.var(name)
if type(v) == Parameter:
var_type = "Parameter"
stop_gradient = v.stop_gradient
trainable = v.trainable
optimize_attr = v.optimize_attr
regularizer = v.regularizer
gradient_clip_attr = v.gradient_clip_attr
error_clip = v.error_clip
elif type(v) == Variable:
var_type = "Variable"
error_clip = v.error_clip
stop_gradient = v.stop_gradient
else:
raise ValueError("unsupported var type: %s", type(v))
orig_var_type = v.type
self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))
# NOTE: v is destroyed by C++ after calling _rename_var.
d = self.desc.find_var(cpt.to_bytes(new_name))
if var_type == "Parameter":
var = Parameter(
self,
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
gradient_clip_attr=gradient_clip_attr,
error_clip=error_clip)
elif var_type == "Variable":
var = Variable(
self,
type=orig_var_type,
name=new_name,
error_clip=error_clip,
stop_gradient=stop_gradient)
# rename the python side, _sync_with_cpp will only add
# new vars/ops to python side.
self.vars[new_name] = var
del self.vars[name]
self._sync_with_cpp()
return var
def _remove_var(self, name):
self._sync_with_cpp()
self.desc._remove_var(cpt.to_bytes(name))
del self.vars[name]
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = Parameter(global_block, *args, **kwargs)
if 'initializer' in kwargs:
def _is_inited_by(block, var):
init_ops = []
for op in block.ops:
if var.name in op.output_arg_names:
init_ops.append(op)
return init_ops
initializer = kwargs['initializer']
init_ops = _is_inited_by(global_block, param)
init_ops_len = len(init_ops)
if init_ops_len > 1:
raise RuntimeError("param " + param.name +
" is inited by multiple init ops " + str(
init_ops))
elif init_ops_len == 1:
#TODO already inited, do nothing, should log a warning
pass
else:
initializer(param, self)
return param
def append_op(self, *args, **kwargs):
"""
Appends a new Operator according to the giving arguments.
Returns:
Operator: the append Operator.
"""
if _in_dygraph_mode():
op = Operator(
block=self,
desc=None,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
# record ops in tracer rather than blocks
#
# TODO(minqiyang): add op stop_gradient support in static mode too.
# currently, we only support stop_gradient in dygraph mode.
_dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False))
else:
op_desc = self.desc.append_op()
op = Operator(
block=self,
desc=op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.append(op)
return op
def _insert_op(self, index, *args, **kwargs):
"""
Insert a Operator according to the giving arguments.
Args:
index(int): the place that the operator to insert.
Returns:
Operator: the insert Operator.
"""
self._sync_with_cpp()
op_desc = self.desc._insert_op(index)
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.insert(index, op)
return op
def _remove_op(self, index):
"""
Remove the specific position operator.
Args:
index(int): the position that the operator to insert.
Returns:
None
"""
self._sync_with_cpp()
self.desc._remove_op(index, index + 1)
del self.ops[index]
def _slice_ops(self, start, end):
"""
Return the Operator between start and end.
Args:
start(int): the start position.
end(int): the end position.
Returns:
list: the Operators between start and end.
"""
return self.ops[start:end]
def _prepend_op(self, *args, **kwargs):
if _in_dygraph_mode():
op = Operator(
self,
None,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
_dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False))
else:
op_desc = self.desc._prepend_op()
op = Operator(
self,
op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.insert(0, op)
return op
def _sync_with_cpp(self):
"""
Sync from the desc on the c++ end. This method is used to synchronize
the c++ desc instance generated by backward.
"""
# sync variables from cpp
for var in self.desc.all_vars():
if not self.has_var(var.name()):
self.create_var(name=var.name(), desc=var, type=var.type())
# sync variables removed from c++ end
for var in list(self.vars.keys()):
if not self.desc.find_var(cpt.to_bytes(var)):
self.vars.pop(var)
# sync operators from cpp
ops_in_cpp = []
for op_idx in range(0, self.desc.op_size()):
ops_in_cpp.append(self.desc.op(op_idx))
if len(self.ops) != 0:
first_op_in_python = self.ops[0].desc
last_op_in_python = self.ops[len(self.ops) - 1].desc
start_index = None
end_index = None
for index in range(len(ops_in_cpp)):
if first_op_in_python == ops_in_cpp[index]:
start_index = index
if last_op_in_python == ops_in_cpp[index]:
end_index = index
assert start_index is not None
assert end_index is not None
assert start_index <= end_index
else:
start_index = 0
end_index = -1
# sync ops append to the head of cpp_ops
for index in range((start_index - 1 - 1), -1, -1):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.insert(0, op)
# sync ops append to the end of cpp_ops
for index in range((end_index + 1), len(ops_in_cpp)):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.append(op)
# sync ops removed from c++ end
if end_index != -1 and end_index < len(self.ops):
ops_in_cpp_index = 0
ops_in_python_index = 0
while ops_in_python_index < len(
self.ops) and ops_in_cpp_index < len(ops_in_cpp):
if self.ops[ops_in_python_index].desc != ops_in_cpp[
ops_in_cpp_index]:
del self.ops[ops_in_python_index]
else:
ops_in_cpp_index += 1
ops_in_python_index += 1
assert len(self.ops) == len(ops_in_cpp)
for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index]
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from the other block.
Args:
other(Block): the other block.
Raises:
ValueError: If type of input is not Block, or the `other` and this
block is not in the same topology.
Returns:
None
"""
if not isinstance(other, Block):
raise TypeError(
"_copy_param_info_from should be invoked with Block")
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:
raise ValueError("_copy_param_info_from should be invoked with "
"same topology")
assert isinstance(v, Variable)
new_p = Parameter(
block=self,
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
gradient_clip_attr=p.gradient_clip_attr,
error_clip=p.error_clip,
name=v.name)
self.vars[new_p.name] = new_p
def _clone_variable(self, var, force_persistable=True):
"""
Clone a variable into current block.
Args:
var: the variable to be cloned.
force_persistable(bool): True means setting the result variable to being persistable.
False means setting the persistable the same with that of input var.
default: True.
Returns:
Variable: the new variable cloned from 'var' in current block.
"""
assert isinstance(var, Variable)
ret_var = None
# make STEP_SCOPES var can be safely cloned.
if var.type == core.VarDesc.VarType.STEP_SCOPES:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.RAW:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.SELECTED_ROWS:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data)
else:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data)
return ret_var
class IrNode(object):
"""
Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.
"""
def __init__(self, node):
"""
Construct an IrNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node,
core.Node), 'node must be the instance of core.Node.'
self.node = node
def name(self):
"""
Return the node name.
Returns:
str: node name.
"""
return self.node.name()
def node_type(self):
"""
Return the node type.
Returns:
core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).
"""
return self.node.node_type()
def var(self):
"""
Return the node variable description.
Returns:
core.VarDesc: node variable description.
"""
return self.node.var()
def op(self):
"""
Return the node operator description.
Returns:
core.OpDesc: node operator description.
"""
return self.node.op()
def id(self):
"""
Return the node id.
Returns:
int: node id.
"""
return self.node.id()
def is_op(self):
"""
If the node is an operator, then return true.
Returns:
bool: indicate whether the node is an operator.
"""
return self.node.is_op()
def is_var(self):
"""
If the node is a variable, then return true.
Returns:
bool: indicate whether the node is a variable.
"""
return self.node.is_var()
def is_ctrl_var(self):
"""
If the node is a control dependence variable, then return true.
Returns:
bool: indicate whether the node is a control dependence variable.
"""
return self.node.is_ctrl_var()
def clear_inputs(self):
"""
Clear the node inputs. After executing the `clear_inputs` function,
the node inputs will be empty.
"""
self.node.clear_inputs()
def remove_input_by_id(self, node_id):
"""
Remove a node from inputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_input(node_id)
def remove_input(self, node):
"""
Remove a node from inputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_input(node.node)
def append_input(self, node):
"""
Append a node in inputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_input(node.node)
def clear_outputs(self):
"""
Clear the node outputs. After executing the `clear_outputs` function,
the node outputs will be empty.
"""
self.node.clear_outputs()
def remove_output_by_id(self, node_id):
"""
Remove a node from outputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_output(node_id)
def remove_output(self, node):
"""
Remove a node from outputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_output(node.node)
def append_output(self, node):
"""
Append a node in outputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_output(node.node)
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrNode): node inputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrNode): node outputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.outputs]
class IrVarNode(IrNode):
"""
Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrVarNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_var(), \
'node must be the instance of core.Node and it must be a variable node.'
super(IrVarNode, self).__init__(node)
self.node = node
def set_shape(self, shape):
"""
Set the node variable shape.
Args:
shape(list): shape to be set.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
self.node.var().set_shape(shape)
def persistable(self):
"""
If the variable node is a persistable variable, then return true.
Returns:
bool: indicate whether the variable is persistable.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().persistable()
def type(self):
"""
Return the variable type.
Returns:
core.VarDesc.VarType: the variable type.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().type()
def dtype(self):
"""
Return the variable data type.
Returns:
core.VarDesc.VarType: the variable data type.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().dtype()
def shape(self):
"""
Return the variable shape.
Returns:
list: the variable shape.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().shape()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrOpNode): node inputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrOpNode): node outputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.outputs]
class IrOpNode(IrNode):
"""
Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrOpNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_op(), \
'node must be the instance of core.Node and it must be a operator node.'
super(IrOpNode, self).__init__(node)
self.node = node
def rename_input(self, old_input_name, new_input_name):
"""
Rename the input of this node.
Args:
old_input_name(str): the old input name.
new_input_name(str): the new input name.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
self.node.op()._rename_input(old_input_name, new_input_name)
def input(self, name):
"""
Get the argument name list by the parameter name for input.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().input(name)
def output(self, name):
"""
Get the argument name list by the parameter name for output.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().output(name)
def set_type(self, new_type):
"""
Change the operator type into new type.
Args:
new_type(str): new operator type to be set.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().set_type(new_type)
def set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
"""
self._update_desc_attr(name, val)
def _update_desc_attr(self, name, val):
"""
Update the value of the op desc's attribute by attribute's name.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
desc = self.node.op()
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and \
all(isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
def input_arg_names(self):
"""
Return input arguments' names of this op node.
Returns:
list(str): input arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().input_arg_names()
def output_arg_names(self):
"""
Return output arguments' names of this op node.
Returns:
list(str): output arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().output_arg_names()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrVarNode): node inputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrVarNode): node outputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.outputs]
class IrGraph(object):
"""
Python IrGraph. Beneath it is a core.Graph, which is used for
creating a c++ Ir Pass Graph. An IrGraph is just a graph view of
a Program. In an IrGraph, both Variables and Operators are graph
nodes.
"""
def __init__(self, graph, for_test=False):
"""
Construct an IrGraph using core.Graph.
Args:
graph(core.Graph): C++ Graph.
for_test(bool): True for the test graph and false for the train graph.
"""
assert isinstance(
graph, core.Graph), 'graph must be the instance of core.Graph.'
self.graph = graph
self._for_test = for_test
def clone(self):
"""
Create a new and duplicated IrGraph.
Warns:
The method only clones the graph structure, not its attributes.
Returns:
IrGraph: A new and duplicated graph.
"""
g = self.graph.clone()
return IrGraph(g, self._for_test)
def is_test(self):
"""
If the graph is used for testing, the function returns true. Otherwise, returns false.
"""
return self._for_test
def all_nodes(self):
"""
Return all nodes included in the graph as a set.
"""
return {IrNode(node) for node in self.graph.nodes()}
def all_var_nodes(self):
"""
Return all variable nodes included in the graph as a set.
"""
return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}
def all_persistable_nodes(self):
"""
Return all persistable variable nodes included in the graph as a set.
"""
persistable_nodes = set()
for node in self.graph.nodes():
if node.is_var() and node.var() is not None and node.var(
).persistable():
persistable_nodes.add(node)
return {IrVarNode(p) for p in persistable_nodes}
def all_op_nodes(self):
"""
Return all operator nodes included in the graph as a set.
"""
return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}
def create_persistable_node(self, name, var_type, shape, var_dtype):
"""
Create a persistable variable node in the graph. In IrGraph,
it can not distinguish between persistable variables and parameters.
Args:
name(str): the name of the persistable variable node.
vart_type(core.VarDesc.VarType): the type of the persistable variable node.
shape(list): the shape of the persistable variable node.
var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.
Returns:
IrVarNode: the created persistable variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
var_desc.set_persistable(True)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_var_node(self, name, var_type, shape, var_dtype):
"""
Create a variable node in the graph. The created variable node is
not persistable.
Args:
name(str): the name of the variable node.
vart_type(core.VarDesc.VarType): the type of the variable node.
shape(list): the shape of the variable node.
var_dtype(core.VarDesc.VarType): the data type of the variable node.
Returns:
IrVarNode: the created variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_var_node_from_desc(self, var_desc):
"""
Create a variable node by using an existing VarDesc in the graph.
Depend on the giving VarDesc, the created variable node may be persistable.
Args:
var_desc(core.VarDesc): the giving variable description.
Returns:
IrVarNode: the created variable node.
"""
return IrVarNode(self.graph.create_var_node(var_desc))
def create_op_node(self, op_type, attrs, inputs, outputs):
"""
Create a operator node in the graph.
Args:
op_type(str): the type of the operator node.
attrs(dict): the attributes of the operator node.
inputs(dict): the inputs of the operator node.
outputs(dict): the outpus of the operator node.
Returns:
IrOpNode: the created operator node.
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for attr, value in six.iteritems(attrs):
self._update_desc_attr(op_desc, attr, value)
for input_name, var_nodes in six.iteritems(inputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_input(input_name,
[var_node.name() for var_node in var_nodes])
for output_name, var_nodes in six.iteritems(outputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_output(output_name,
[var_node.name() for var_node in var_nodes])
return IrOpNode(self.graph.create_op_node(op_desc))
def create_op_node_from_desc(self, op_desc):
"""
Create a operator node by using an existing OpDesc in the graph.
Args:
op_desc(core.VarDesc): the giving operator description.
Returns:
IrOpNode: the created operator node.
"""
return IrOpNode(self.graph.create_op_node(op_desc))
def update_input_link(self, old_input_node, new_input_node, op_node):
"""
Update the input's link of a operator node.
Args:
old_input_node(IrNode): the old input node of the giving op_node.
new_input_node(IrNode): the new input node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_input_node.node in self.graph.nodes() and new_input_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'
old_input_node.remove_output(op_node)
op_node.remove_input(old_input_node)
new_input_node.append_output(op_node)
op_node.append_input(new_input_node)
op_node.rename_input(old_input_node.name(), new_input_node.name())
def link_to(self, node_in, node_out):
"""
Connect two nodes.
Args:
node_in(IrNode): the input node.
node_out(IrNode): the output node.
"""
assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \
'The two arguments(node_in&node_out) must be in the graph nodes.'
node_in.append_output(node_out)
node_out.append_input(node_in)
def safe_remove_nodes(self, remove_nodes):
"""
Remove nodes safely since links connected to these removed nodes are
also removed.
Args:
remove_nodes(set): the nodes prepared to be removed.
"""
if not isinstance(remove_nodes, set):
if isinstance(remove_nodes, Iterable):
remove_nodes = set(remove_nodes)
else:
remove_nodes = {remove_nodes}
original_nodes = {n.node for n in remove_nodes}
core.graph_safe_remove_nodes(self.graph, original_nodes)
def resolve_hazard(self):
ordered_nodes = core.topology_sort(self.graph)
var_nodes = dict()
for node in ordered_nodes:
if node.is_op() and node.op() is not None:
for each_var_name in node.op().input_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.inputs, each_var_name)
]
for each_var_name in node.op().output_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.outputs, each_var_name)
]
else:
var_nodes[each_var_name].append(
self._find_node_by_name(node.outputs,
each_var_name))
self.graph.resolve_hazard(var_nodes)
def has_circle(self):
"""
Check if the graph has a circle.
Returns:
bool: True if the graph has a circle else False.
"""
return core.has_circle(self.graph)
def graph_num(self):
"""
Count the number of unconnected graphs in this graph.
Returns:
int: the number of unconnected graphs.
"""
return core.graph_num(self.graph)
def topology_sort(self):
"""
Perform the topology sort operation on the graph.
Notes: the `graph` cannot contain a circle.
Returns:
list(IrNode): nodes in topology order.
"""
ordered_nodes = core.topology_sort(self.graph)
return [IrNode(n) for n in ordered_nodes]
def build_adjacency_list(self):
"""
Build an adjacency list of operations for the `graph`.
Returns:
dict{IrNode: set(IrNode)}: the adjacency list.
"""
adj_list = core.build_adjacency_list(self.graph)
wrapped_adj_list = dict()
for k, v in six.iteritems(adj_list):
wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}
return wrapped_adj_list
def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):
"""
Draw the graph. If `dot` command is installed, the drawn graph
will be saved as pdf file type, otherwise dot file type is used.
Args:
save_path(str): the save path of drawn graph.
name(str): the name of drawn graph.
marked_nodes(set(IrNode)): nodes that are needed to be marked.
Default value is None.
remove_ctr_var(bool): If it is set True, all control variable nodes
in the graph will be removed. Default value is True.
"""
def _convert_to_pdf(dot_file_path):
pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'
exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \
+ ' -o ' + pdf_save_path, shell=True)
if exited_code != 0:
print('The dot command is needed for creating pdf files.')
print('The {} is saved as the dot filetype.'.format(
dot_file_path))
remove_ctr_vars = set()
if remove_ctr_var:
for node in self.all_var_nodes():
if node.is_ctrl_var():
remove_ctr_vars.add(node)
self.safe_remove_nodes(remove_ctr_vars)
print('Total ops num = {}.'.format(len(self.all_op_nodes())))
if marked_nodes is not None:
if not isinstance(marked_nodes, set):
if isinstance(marked_nodes, Iterable):
marked_nodes = set(marked_nodes)
else:
marked_nodes = {marked_nodes}
marked_nodes = {n.node for n in marked_nodes}
remove_ctr_vars = {n.node for n in remove_ctr_vars}
marked_nodes = marked_nodes - remove_ctr_vars
if self.graph.has('__graphviz__marked_node__'):
self.graph.erase('__graphviz__marked_node__')
self.graph.set('__graphviz__marked_node__', marked_nodes)
viz_dot_path = os.path.join(save_path, name) + '.dot'
viz_pass = core.get_pass('graph_viz_pass')
viz_pass.set('graph_viz_path', viz_dot_path)
viz_pass.apply(self.graph)
_convert_to_pdf(viz_dot_path)
def to_program(self):
"""
Convert the graph into a Program.
WARN: When the graph includes backward operator nodes, the
conversion process may be failed. Usually, this function is
only used to convert a test graph.
Returns:
Program: a program converted from the graph.
"""
convert_pass = core.get_pass('graph_to_program_pass')
desc = core.ProgramDesc()
convert_pass.set_not_owned('program', desc)
convert_pass.apply(self.graph)
program = Program._construct_from_desc(desc)
return program
def _find_node_by_name(self, nodes, node_name):
"""
Find a node in the giving nodes set by the name.
"""
target_node = None
for n in nodes:
if n.name() == node_name:
target_node = n
assert target_node is not None, "Cannot find the target node in the giving set."
return target_node
def _update_desc_attr(self, desc, name, val):
"""
Update the value of desc's attribute by attribute's name.
"""
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
class Program(object):
"""
Python Program. Beneath it is a ProgramDesc, which is used for
create c++ Program. A program is a self-contained programing
language like container. It has at least one Block, when the
control flow op like conditional_block, while_op is included,
it will contains nested block.
Please reference the framework.proto for details.
Notes: we have default_startup_program and default_main_program
by default, a pair of them will shared the parameters.
The default_startup_program only run once to initialize parameters,
default_main_program run in every mini batch and adjust the weights.
Returns:
A empty program.
Examples:
>>> main_program = fluid.Program()
>>> startup_program = fluid.Program()
>>> with fluid.program_guard(main_program=main_program, startup_program=startup_program):
>>> fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
>>> fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
>>> fluid.layers.fc(name="fc", shape=[10], dtype='float32', act="relu")
"""
def __init__(self):
self.desc = core.ProgramDesc()
self.blocks = [Block(self, 0)]
self.current_block_idx = 0
self._seed = 0
self._current_role = core.op_proto_and_checker_maker.OpRole.Forward
self._op_role_var = []
# for distribute training
# _is_distributed = True if under distributed training
self._is_distributed = False
# _is_chief = True if the trainer is the first one, usually No.0
self._is_chief = False
# _parameters_on_pservers records all the parameters distributed on parameter servers.
self._parameters_on_pservers = None
# _endpoints is a list about parameter servers ip:port, such as ["ip:port","ip:port"]
self._endpoints = []
# if current role is parameter server, the _ps_endpoint is its "ip:port"
self._ps_endpoint = None
# trainers_endpoints, it is used for distribution.
self._trainers_endpoints = []
# the distributed lookup table names
self._distributed_lookup_table = None
# use Deep gradient comrepssion or not
self._enable_dgc = False
# @deprecated(the python memory optimize transpiler is deprecated)
# whether the program is optimized by memory_optimize_transpiler
self.__is_mem_optimized = False
# if this program has been optimized by distributed optimizer
# fleet_opt will be given a value
self._fleet_opt = None
self._program_config = None
@property
def _is_mem_optimized(self):
# if the program is optimized, operator input/outputs
# maybe same, which conflict with save_inference_model.
return self.__is_mem_optimized
@_is_mem_optimized.setter
def _is_mem_optimized(self, target):
self.__is_mem_optimized = target
@property
def op_role(self):
"""
The operator role. In a enum {Forward, Backward, Optimize}.
Notes: this is a low level API. It is used only for ParallelExecutor to
duplicate or schedule operator to devices.
For example, the forward operator should be executed on every device.
The backward operator should be executed on every device and the
parameter gradient of backward (use :code:`op_role_var` to get this
variable) operator should be merged to one device. The optimization
operators should be executed on only one device and broadcast the
optimization result, i.e., the new parameter, to every other device.
"""
return self._current_role
@op_role.setter
def op_role(self, role):
self._current_role = role
@property
def op_role_var(self):
"""
The auxiliary variables for :code:`op_role` property.
See Also: :code:`Program.op_role`'s documentation for details.
Notes: This is a very low-level API. Users should not use it directly.
"""
return self._op_role_var
@op_role_var.setter
def set_op_role_var(self, var_name):
self._op_role_var = [var_name]
@contextlib.contextmanager
def _backward_role_guard(self):
tmp_role = self._current_role
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Backward
yield
self._current_role = tmp_role
@signature_safe_contextmanager
def _optimized_guard(self, param_and_grads):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically.
Notes: This is a very low level API. Users should not use it directly.
Args:
param_and_grads(list): The variables (names) to be optimized.
Examples:
>>> p, g = backward(...)
>>> with program._optimized_guard([p,g]):
>>> p = p - 0.001 * g
"""
tmp_role = self._current_role
tmp_var = self._op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Optimize
self._op_role_var = [
var.name if isinstance(var, Variable) else var
for var in param_and_grads
]
yield
self._op_role_var = tmp_var
self._current_role = tmp_role
@signature_safe_contextmanager
def _lr_schedule_guard(self, is_with_opt=False):
"""
A with guard to set :code:`LRSched` :code:`OpRole` and
:code:`OpRoleVar` automatically. The :code:`OpRoleVar` is
set to the target learning rate.
Notes: This is a very low level API. Users should not use it directly.
Args:
is_with_opt: Only set to true if these ops a in the middle
of a bunch of optimize ops so that it can be treated
correctly. For example, sgd->lr_op->sgd->lr_op->sgd.
Examples:
>>> p, g = backward(...)
>>> with program.lr_schedule_guard():
>>> lr = lr * decay
"""
tmp_role = self._current_role
tmp_var = self._op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.LRSched
if is_with_opt:
self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)
# TODO(typhoonzero): how to set target learning rate var
self._op_role_var = []
yield
self._op_role_var = tmp_var
self._current_role = tmp_role
def __str__(self):
"""
Get the protobuf debug string of this Program.
Returns:
(str): The protobuf debug string.
Raises:
ValueError: If any of required fields is not set.
"""
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise Value error when any of required fields
is not set.
with_details(bool): True if more details about variables and
parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need
to print.
Returns:
str : The debug string.
Raises:
ValueError: If any of required fields is not set and throw_on_error is
True.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = ""
for block in self.blocks:
res_str += block.to_string(throw_on_error, with_details)
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.ProgramDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
def _get_desc(self):
"""
Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`.
Notes: This is a very low level API. Users should not use this API
directly.
"""
return self.desc
def _version(self):
return self.desc._version()
def clone(self, for_test=False):
"""
Create a new, duplicated program.
Some operators, e.g., :code:`batch_norm`, behave differently between
training and testing. They have an attribute, :code:`is_test`, to
control this behaviour. This method will change the :code:`is_test`
attribute of them to :code:`True` when :code:`for_test=True`.
* Set for_test to False when we want to clone the program for training.
* Set for_test to True when we want to clone the program for testing.
Notes: This API DOES NOT prune any operator. Use
:code:`clone(for_test=True)` before backward and optimization please. e.g.
>>> test_program = fluid.default_main_program().clone(for_test=True)
>>> optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
>>> optimizer.minimize()
Args:
for_test(bool): True if change the :code:`is_test` attribute of
operators to :code:`True`.
Returns:
Program: The new, duplicated Program object.
Examples:
1. To clone a test program, the sample code is:
>>> import paddle.fluid as fluid
>>> train_program = fluid.Program()
>>> startup_program = fluid.Program()
>>> with fluid.program_guard(train_program, startup_program):
>>> img = fluid.layers.data(name='image', shape=[784])
>>> hidden = fluid.layers.fc(input=img, size=200, act='relu')
>>> hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
>>> loss = fluid.layers.cross_entropy(
>>> input=fluid.layers.fc(hidden, size=10, act='softmax'),
>>> label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
>>>
>>> test_program = train_program.clone(for_test=True)
>>>
>>> sgd = fluid.optimizer.SGD(learning_rate=1e-3)
>>> with fluid.program_guard(train_program, startup_program):
>>> sgd.minimize(loss)
2. The :code:`clone` method can be avoid if you create program for
training and program for testing individually.
>>> import paddle.fluid as fluid
>>>
>>> def network(is_test):
>>> img = fluid.layers.data(name='image', shape=[784])
>>> hidden = fluid.layers.fc(input=img, size=200, act='relu')
>>> hidden = fluid.layers.dropout(hidden, dropout_prob=0.5, is_test=is_test)
>>> loss = fluid.layers.cross_entropy(
>>> input=fluid.layers.fc(hidden, size=10, act='softmax'),
>>> label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
>>> return loss
>>>
>>> train_program = fluid.Program()
>>> startup_program = fluid.Program()
>>> test_program = fluid.Program()
>>>
>>> with fluid.program_guard(train_program, startup_program):
>>> with fluid.unique_name.guard():
>>> loss = network(is_test=False)
>>> sgd = fluid.optimizer.SGD(learning_rate=1e-3)
>>> sgd.minimize(loss)
>>>
>>> # the test startup program is not used.
>>> with fluid.program_guard(test_program, fluid.Program()):
>>> with fluid.unique_name.guard():
>>> loss = network(is_test=True)
The two code snippets above will generate same programs.
"""
if for_test:
p = self._inference_optimize(prune_read_op=False)
else:
p = Program()
p.current_block_idx = self.current_block_idx
p._seed = self._seed
p.desc = core.ProgramDesc(self.desc)
p.blocks = [
Block(p, i) for i in six.moves.range(self.desc.num_blocks())
]
p._current_role = self._current_role
p._op_role_var = self._op_role_var
p._sync_with_cpp()
p._copy_param_info_from(self)
p._copy_data_info_from(self)
p._copy_dist_param_info_from(self)
return p
def _prune(self, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`.
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
targets(list|Variable|Operator): A list of variables or operators
need to be pruned
Returns:
Program: A new, pruned program.
"""
if not isinstance(targets, list):
targets = [targets]
targets_idx = []
for t in targets:
if not isinstance(t, Operator):
if isinstance(t, Variable):
# After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
t.op = None
global_block = self.global_block()
for idx, op in enumerate(global_block.ops):
if t.name in op.output_arg_names:
t.op = op
break
t = t.op
if t is None:
raise ValueError(
"The target variable must have an "
"associated operator that generates it.")
else:
raise ValueError("All targets of prune() can only be "
"Variable or Operator.")
targets_idx.append([t.block.idx, t.idx])
res = Program()
res.desc = core.prune(self.desc, targets_idx)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
def _inference_optimize(self, prune_read_op=True):
"""
This method will create a new program and do following adjustments on it:
1. Remove all reader variables and their creator ops if exist.
2. Remove the :code:`read_op` if exists.
3. change the :code:`is_test`
attribute of operators to :code:`True`. All the :code:`Parameter`
information will be lost.
Args:
prune_read_op(bool): remove the read ops that are added by py_reader
for cpp inference library
Notes: This API is a very low level API. Use
:code:`Program.clone(for_test=True)` instead.
Returns:
Program: The new program.
"""
res = Program()
res.desc = core.ProgramDesc(self.desc)
# remove all readers and the read_op if exist
read_op_idx = 0
root_block = res.desc.block(0)
if prune_read_op:
while True:
if read_op_idx >= root_block.op_size() or root_block.op(
read_op_idx).type() == 'read':
break
read_op_idx += 1
if read_op_idx < root_block.op_size():
root_block._remove_op(0, read_op_idx + 1)
for var in root_block.all_vars():
if var.type() == core.VarDesc.VarType.READER:
root_block._remove_var(cpt.to_bytes(var.name()))
# change all `is_test` attributes to True
for i in six.moves.range(res.desc.num_blocks()):
block = res.desc.block(i)
for j in six.moves.range(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op._set_attr('is_test', True)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
@staticmethod
def parse_from_string(binary_str):
"""
Deserialize a program desc from protobuf binary string.
Notes: All information about parameters will be lost after serialization
and deserialization.
Args:
binary_str_type(str): The binary prootbuf string.
Returns:
Program: A deserialized program desc.
"""
p = Program()
p.desc = core.ProgramDesc(binary_str)
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@staticmethod
def _construct_from_desc(desc):
"""
Construct a program from program desc.
Args:
desc(core.ProgramDesc): The program desc for constructing.
Returns:
Program: A program.
"""
p = Program()
p.desc = desc
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@property
def random_seed(self):
"""
The default random seed for random operators in Program. Zero means get
the random seed from random device.
Notes: It must be set before the operators have been added.
"""
return self._seed
@property
def num_blocks(self):
"""
The number of blocks in this program.
"""
return self.desc.num_blocks()
@random_seed.setter
def random_seed(self, seed):
if not isinstance(seed, int):
raise ValueError("Seed must be a integer.")
self._seed = seed
def __repr__(self):
return self.__str__()
def global_block(self):
"""
Get the first block of this program.
"""
return self.blocks[0]
def block(self, index):
"""
Get the :code:`index` block of this program
Args:
index(int): The index of block to get
Returns:
Block: The :code:`index` block
"""
return self.blocks[index]
def current_block(self):
"""
Get the current block. The :code:`current` block is the block to append
operators.
"""
return self.blocks[self.current_block_idx]
def _create_block(self, parent_idx=None):
"""
Create a new block with the :code:`parent_idx` and change the current block
to new block.
Args:
parent_idx(int): The parent block index.
Returns:
Block: The new block.
"""
new_block_idx = len(self.blocks)
parent = self.current_block() if parent_idx is None else self.block(
parent_idx)
self.desc.append_block(parent.desc)
self.current_block_idx = new_block_idx
self.blocks.append(Block(self, self.current_block_idx))
return self.current_block()
def _rollback(self):
"""
Exit a code block, i.e., roll back to the parent block.
Returns:
None
"""
self.current_block_idx = self.current_block().parent_idx
def _sync_with_cpp(self):
"""
Synchronize Python instance to its binding C++ object instance.
If the program is modified in C++ space, this method should be invoked.
Notes: This is a very low level API. Users should not invoke it
directly.
Returns:
None
"""
for block_idx in range(len(self.blocks), self.desc.num_blocks()):
self.blocks.append(Block(self, block_idx))
for block in self.blocks:
block._sync_with_cpp()
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("_copy_param_info_from should be invoked with "
"Program")
if len(self.blocks) != len(other.blocks):
raise ValueError("_copy_param_info_from should be invoked with two "
"program, with represent the same topology")
self.global_block()._copy_param_info_from(other.global_block())
def _copy_dist_param_info_from(self, other):
"""
Copy the information of distributed information from other program.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("_copy_dist_param_info_from should be invoked with "
"Program")
self._is_distributed = other._is_distributed
self._is_chief = other._is_chief
self._parameters_on_pservers = other._parameters_on_pservers
self._endpoints = other._endpoints
self._ps_endpoint = other._ps_endpoint
self._distributed_lookup_table = other._distributed_lookup_table
def _copy_data_info_from(self, other):
"""
Copy the information of data variables from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("_copy_param_info_from should be invoked with "
"Program")
if len(self.blocks) != len(other.blocks):
raise ValueError("_copy_param_info_from should be invoked with two "
"program, with represent the same topology")
for var in list(other.global_block().vars.values()):
if var.is_data:
self.global_block().var(var.name).is_data = True
def list_vars(self):
"""
Get all variables from this Program. A iterable object is returned.
Returns:
iterable: The generator will yield every variable in this program.
"""
for each_block in self.blocks:
for each_var in list(each_block.vars.values()):
yield each_var
class Parameter(Variable):
"""
Parameter is derived from Variable. A parameter is a persistable
Variable, and will be updated by optimizers after each iteration.
The training of a neural network is essentially the updating of
its parameters.
Relative to a general Variable, a Parameter has several its own
member variables:
Args:
trainable(bool): True if the parameter need to be updated after
iterations.
optimize_attr(map): Parameter attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the parameter. Default: None
gradient_clip_attr(BaseGradientClipAttr): The gradint clip strategy
which will be applied on the parameter. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this parameter.
"""
def __init__(self, block, shape, dtype, **kwargs):
if shape is None or dtype is None:
raise ValueError("Parameter must set shape and dtype")
if len(shape) == 0:
raise ValueError("Parameter shape cannot be empty")
for each in shape:
if each < 0:
raise ValueError("Parameter shape should not be related with "
"batch-size")
Variable.__init__(
self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True)
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None)
self.do_model_average = kwargs.get('do_model_average', None)
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = Variable.to_string(self, throw_on_error, True)
additional_attr = ("trainable", "optimize_attr", "regularizer",
"gradient_clip_attr", "do_model_average")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (
attr_name, six.binary_type(getattr(self, attr_name)))
else:
res_str = Variable.to_string(self, throw_on_error, False)
return res_str
__repr__ = __str__
# program is a global instance.
_main_program_ = Program()
_startup_program_ = Program()
def default_startup_program():
"""
Get default/global startup program.
The layer function in :code:`fluid.layers` will create parameters, readers,
NCCL handles as global variables. The :code:`startup_program` will
initialize them by the operators in startup program. The layer function will
append these initialization operators into startup program.
This method will return the :code:`default` or the :code:`current` startup
program. Users can use :code:`fluid.program_guard` to switch program.
Returns:
Program: startup program
"""
return _startup_program_
def default_main_program():
"""
Get default/global main program. The main program is used for training or
testing.
All layer function in :code:`fluid.layers` will append operators and
variables to the :code:`default_main_program`.
The :code:`default_main_program` is the default program in a lot of APIs.
For example, the :code:`Executor.run()` will execute the
:code:`default_main_program` when the program is not specified.
Returns:
Program: main program
"""
return _main_program_
def switch_main_program(program):
"""
Switch the main program to a new program.
Args:
program(Program): The new main program
Returns:
Program: The previous main program
"""
global _main_program_
prev_program = _main_program_
_main_program_ = program
return prev_program
def switch_startup_program(program):
"""
Switch the startup program to a new program
Args:
program(Program): The new startup program
Returns:
Program: The previous startup program
"""
global _startup_program_
prev_program = _startup_program_
_startup_program_ = program
return prev_program
@signature_safe_contextmanager
def program_guard(main_program, startup_program=None):
"""
Change the global main program and startup program with `with` statement.
Layer functions in the Python `with` block will append operators and
variables to the new main programs.
Examples:
>>> import paddle.fluid as fluid
>>> main_program = fluid.Program()
>>> startup_program = fluid.Program()
>>> with fluid.program_guard(main_program, startup_program):
>>> data = fluid.layers.data(...)
>>> hidden = fluid.layers.fc(...)
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
Examples:
>>> import paddle.fluid as fluid
>>> main_program = fluid.Program()
>>> # does not care about startup program. Just pass a temporary value.
>>> with fluid.program_guard(main_program, fluid.Program()):
>>> data = ...
Args:
main_program(Program): New main program inside `with` statement.
startup_program(Program): New startup program inside `with` statement.
None means do not change startup program.
"""
if not isinstance(main_program, Program):
raise TypeError("main_program should be Program")
main_program = switch_main_program(main_program)
if startup_program is not None:
if not isinstance(startup_program, Program):
raise TypeError("startup_program should be Program")
startup_program = switch_startup_program(startup_program)
yield
switch_main_program(main_program)
if startup_program is not None:
switch_startup_program(startup_program)
def _get_var(name, program=None):
"""
Get a variable by name from the global block of a program.
Args:
name(str): name of the variable
program(Program|None): program object.
If None, default_global_program() will be used.
Returns:
Variable
"""
if program is None:
program = default_main_program()
assert isinstance(name, str)
assert isinstance(program, Program)
return program.global_block().var(name)
@signature_safe_contextmanager
def _dygraph_guard(tracer):
global _dygraph_tracer_
tmp_trace = _dygraph_tracer_
_dygraph_tracer_ = tracer
yield
_dygraph_tracer_ = tmp_trace
@signature_safe_contextmanager
def _dygraph_place_guard(place):
global _dygraph_current_expected_place_
tmp_place = _dygraph_current_expected_place_
_dygraph_current_expected_place_ = place
yield
_dygraph_current_expected_place_ = tmp_place
| 33.403326 | 105 | 0.570873 |
7953da544c8b6874d3e06132ce1376c43e0b3d6d | 6,486 | py | Python | dali/test/python/test_operator_readers_index.py | Riyria-was-taken/DALI | 35360cf0986957f9c6d93ed54be221628d99ad0c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/test/python/test_operator_readers_index.py | Riyria-was-taken/DALI | 35360cf0986957f9c6d93ed54be221628d99ad0c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/test/python/test_operator_readers_index.py | Riyria-was-taken/DALI | 35360cf0986957f9c6d93ed54be221628d99ad0c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from nvidia.dali import Pipeline, pipeline_def
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.tfrecord as tfrec
import os.path
import tempfile
import numpy as np
from test_utils import compare_pipelines, get_dali_extra_path
from nose.tools import assert_raises
def skip_second(src, dst):
with open(src, 'r') as tmp_f:
with open(dst, 'w') as f:
second = False
for l in tmp_f:
if not second:
f.write(l)
second = not second
def test_tfrecord():
class TFRecordPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data, data_idx):
super(TFRecordPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.TFRecord(path = data,
index_path = data_idx,
features = {"image/encoded" : tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)}
)
def define_graph(self):
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
return images
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx_org = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
tfrecord_idx = "tfr_train.idx"
idx_files_dir = tempfile.TemporaryDirectory()
idx_file = os.path.join(idx_files_dir.name, tfrecord_idx)
skip_second(tfrecord_idx_org, idx_file)
pipe = TFRecordPipeline(1, 1, 0, 1, tfrecord, idx_file)
pipe_org = TFRecordPipeline(1, 1, 0, 1, tfrecord, tfrecord_idx_org)
pipe.build()
pipe_org.build()
iters = pipe.epoch_size("Reader")
for _ in range(iters):
out = pipe.run()
out_ref = pipe_org.run()
for a, b in zip(out, out_ref):
assert np.array_equal(a.as_array(), b.as_array())
_ = pipe_org.run()
def test_recordio():
class MXNetReaderPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data, data_idx):
super(MXNetReaderPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.MXNet(path = [data], index_path=[data_idx],
shard_id = device_id, num_shards = num_gpus)
def define_graph(self):
images, _ = self.input(name="Reader")
return images
recordio = os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.rec')
recordio_idx_org = os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.idx')
recordio_idx = "rio_train.idx"
idx_files_dir = tempfile.TemporaryDirectory()
idx_file = os.path.join(idx_files_dir.name, recordio_idx)
skip_second(recordio_idx_org, idx_file)
pipe = MXNetReaderPipeline(1, 1, 0, 1, recordio, idx_file)
pipe_org = MXNetReaderPipeline(1, 1, 0, 1, recordio, recordio_idx_org)
pipe.build()
pipe_org.build()
iters = pipe.epoch_size("Reader")
for _ in range(iters):
out = pipe.run()
out_ref = pipe_org.run()
for a, b in zip(out, out_ref):
assert np.array_equal(a.as_array(), b.as_array())
_ = pipe_org.run()
def test_wrong_feature_shape():
features = {
'image/encoded': tfrec.FixedLenFeature((), tfrec.string, ""),
'image/object/bbox': tfrec.FixedLenFeature([], tfrec.float32, -1.0),
'image/object/class/label': tfrec.FixedLenFeature([], tfrec.int64, -1),
}
test_dummy_data_path = os.path.join(get_dali_extra_path(), 'db', 'coco_dummy')
pipe = Pipeline(1, 1, 0)
with pipe:
input = fn.readers.tfrecord(path = os.path.join(test_dummy_data_path, 'small_coco.tfrecord'),
index_path = os.path.join(test_dummy_data_path, 'small_coco_index.idx'),
features = features)
pipe.set_outputs(input['image/encoded'], input['image/object/class/label'], input['image/object/bbox'])
pipe.build()
# the error is raised because FixedLenFeature is used with insufficient shape to house the input
assert_raises(RuntimeError, pipe.run)
batch_size_alias_test=64
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def mxnet_pipe(mxnet_op, path, index_path):
files, labels = mxnet_op(path=path, index_path=index_path)
return files, labels
def test_mxnet_reader_alias():
recordio = [os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.rec')]
recordio_idx = [os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.idx')]
new_pipe = mxnet_pipe(fn.readers.mxnet, recordio, recordio_idx)
legacy_pipe = mxnet_pipe(fn.mxnet_reader, recordio, recordio_idx)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def tfrecord_pipe(tfrecord_op, path, index_path):
inputs = tfrecord_op(path=path, index_path=index_path,
features={"image/encoded" : tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)})
return inputs["image/encoded"]
def test_tfrecord_reader_alias():
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
new_pipe = tfrecord_pipe(fn.readers.tfrecord, tfrecord, tfrecord_idx)
legacy_pipe = tfrecord_pipe(fn.tfrecord_reader, tfrecord, tfrecord_idx)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
| 43.530201 | 124 | 0.660191 |
7953dabebfd901f3435d828c32297755a5df7147 | 3,994 | py | Python | lldb/packages/Python/lldbsuite/test/commands/watchpoints/watchpoint_set_command/TestWatchLocationWithWatchSet.py | azharudd/llvm-project | 2fe2a3f923125e4c86bc5d140f327b5904a5bc97 | [
"Apache-2.0"
] | 1 | 2022-03-28T05:58:03.000Z | 2022-03-28T05:58:03.000Z | lldb/packages/Python/lldbsuite/test/commands/watchpoints/watchpoint_set_command/TestWatchLocationWithWatchSet.py | DougGregor/llvm-project | 97602a8bd045f087e02348b64ffbdd143a33e10b | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/commands/watchpoints/watchpoint_set_command/TestWatchLocationWithWatchSet.py | DougGregor/llvm-project | 97602a8bd045f087e02348b64ffbdd143a33e10b | [
"Apache-2.0"
] | null | null | null | """
Test lldb watchpoint that uses 'watchpoint set -w write -s size' to watch a pointed location with size.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class WatchLocationUsingWatchpointSetTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Our simple source filename.
self.source = 'main.cpp'
# Find the line number to break inside main().
self.line = line_number(
self.source, '// Set break point at this line.')
# This is for verifying that watch location works.
self.violating_func = "do_bad_thing_with_location"
# Build dictionary to have unique executable names for each test
# method.
@expectedFailureAll(
oslist=["linux"],
archs=[
'aarch64',
'arm'],
bugnumber="llvm.org/pr26031")
def test_watchlocation_using_watchpoint_set(self):
"""Test watching a location with 'watchpoint set expression -w write -s size' option."""
self.build()
self.setTearDownCleanup()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint to set a watchpoint when stopped on the breakpoint.
lldbutil.run_break_set_by_file_and_line(
self, None, self.line, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to the breakpoint.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# Now let's set a write-type watchpoint pointed to by 'g_char_ptr' and
# with offset as 7.
# The main.cpp, by design, misbehaves by not following the agreed upon
# protocol of only accessing the allowable index range of [0, 6].
self.expect(
"watchpoint set expression -w write -s 1 -- g_char_ptr + 7",
WATCHPOINT_CREATED,
substrs=[
'Watchpoint created',
'size = 1',
'type = w'])
self.runCmd("expr unsigned val = g_char_ptr[7]; val")
self.expect(self.res.GetOutput().splitlines()[0], exe=False,
endstr=' = 0')
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should be 0 initially.
self.expect("watchpoint list -v",
substrs=['hit_count = 0'])
self.runCmd("process continue")
# We should be stopped again due to the watchpoint (write type), but
# only once. The stop reason of the thread should be watchpoint.
self.expect("thread list", STOPPED_DUE_TO_WATCHPOINT,
substrs=['stopped',
'stop reason = watchpoint',
self.violating_func])
# Switch to the thread stopped due to watchpoint and issue some
# commands.
self.switch_to_thread_with_stop_reason(lldb.eStopReasonWatchpoint)
self.runCmd("thread backtrace")
self.runCmd("expr unsigned val = g_char_ptr[7]; val")
self.expect(self.res.GetOutput().splitlines()[0], exe=False,
endstr=' = 99')
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should now be the same as the number of threads that
# stopped on a watchpoint.
threads = lldbutil.get_stopped_threads(
self.process(), lldb.eStopReasonWatchpoint)
self.expect("watchpoint list -v",
substrs=['hit_count = %d' % len(threads)])
self.runCmd("thread backtrace all")
| 38.403846 | 103 | 0.611167 |
7953db83938c77357a9c86dfe01eb506d3edd338 | 677 | py | Python | queuedownloader/services/youtube.py | RevolutionsDev/queuedownloader | cf93c26b82357ae02d452cb2a3396d27a9db8c42 | [
"MIT"
] | null | null | null | queuedownloader/services/youtube.py | RevolutionsDev/queuedownloader | cf93c26b82357ae02d452cb2a3396d27a9db8c42 | [
"MIT"
] | null | null | null | queuedownloader/services/youtube.py | RevolutionsDev/queuedownloader | cf93c26b82357ae02d452cb2a3396d27a9db8c42 | [
"MIT"
] | null | null | null | from ._base import DownloaderService
from youtube_dl import YoutubeDL
class YouTubeService(DownloaderService):
name = "YouTubeService"
def __init__(self, *args, **kwargs):
super(YouTubeService, self).__init__(*args, **kwargs)
self.subtitles = kwargs.get("subtitles", True)
self.subtitles_lang = kwargs.get("subtitles_lang", "spanish")
@staticmethod
def supported(url):
if isinstance(url, str):
if url.startswith("https://youtube.com/") or url.startswith("https://youtu.be/"):
return True
else:
raise TypeError("url should be a str")
| 32.238095 | 94 | 0.604136 |
7953db8df50b416fd18c4267d002220eebae40cb | 14,697 | py | Python | nas_gcn/analysis/analysis_utils.py | deephyper/nas-gcn | 7faa66e9f4ec1f990a5ccdcfe0dd5255d4475b6f | [
"BSD-2-Clause"
] | null | null | null | nas_gcn/analysis/analysis_utils.py | deephyper/nas-gcn | 7faa66e9f4ec1f990a5ccdcfe0dd5255d4475b6f | [
"BSD-2-Clause"
] | null | null | null | nas_gcn/analysis/analysis_utils.py | deephyper/nas-gcn | 7faa66e9f4ec1f990a5ccdcfe0dd5255d4475b6f | [
"BSD-2-Clause"
] | null | null | null | import json
import pickle
import glob
import numpy as np
import pandas as pd
from tabulate import tabulate
from datetime import datetime
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
def load_json(path):
"""Load json file.
Args:
path (str): file location
Returns:
data (dict)
"""
with open(path, 'r') as f:
data = json.load(f)
return data
def to_sec(ts):
"""Format time string to seconds.
Args:
ts (string): time string.
Returns:
time (float): second format
"""
try:
return datetime.strptime(ts, '%Y-%m-%d %H:%M:%S').timestamp()
except:
return datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f').timestamp()
def three_random_split(DATA_DIR, multi_class=False):
"""Combine results from three random seed trainings.
Args:
DATA_DIR (str): data pickle file location.
multi_class (bool): if the regression has multi-class.
Returns:
train_true (np.array): training data true labels.
train_pred (np.array): training data predicted labels.
valid_true (np.array): validation data true labels.
valid_pred (np.array): validation data predicted labels.
test_true (np.array): testing data true labels.
test_pred (np.array): testing data predicted labels.
"""
y_true = []
y_pred = []
files = sorted(glob.glob(DATA_DIR + 'best_archs_result_0_*.pickle'))
for file in files:
with open(file, 'rb') as f:
_ = pickle.load(f)
for _ in range(3):
if multi_class:
y_true.append(pickle.load(f)[np.newaxis, ...])
y_pred.append(pickle.load(f).squeeze()[np.newaxis, ...])
else:
y_true.append(pickle.load(f).ravel())
y_pred.append(pickle.load(f).ravel().squeeze())
train_true = np.vstack([y_true[i] for i in [0, 3, 6]])
train_pred = np.vstack([y_pred[i] for i in [0, 3, 6]])
valid_true = np.vstack([y_true[i] for i in [1, 4, 7]])
valid_pred = np.vstack([y_pred[i] for i in [1, 4, 7]])
test_true = np.vstack([y_true[i] for i in [2, 5, 8]])
test_pred = np.vstack([y_pred[i] for i in [2, 5, 8]])
return train_true, train_pred, valid_true, valid_pred, test_true, test_pred
def three_random_mean_std(DATA_DIR, multi_class=False):
"""Calculate the mean and standard deviation of three random seed trainings.
Args:
DATA_DIR (str): data pickle file location.
multi_class (bool): if the regression has multi-class.
Returns:
m (float): mean value.
s (float): standard deviation value.
"""
output = three_random_split(DATA_DIR, multi_class=multi_class)
funcs = [mean_absolute_error, mean_squared_error, r2_score]
if not multi_class:
result = []
for func in funcs:
for i in range(3):
result.append([func(output[i * 2][j], output[i * 2 + 1][j]) for j in range(len(output[0]))])
result = np.array(result)
m = result.mean(axis=1)
s = result.std(axis=1)
print(tabulate(
[['Train', f'{m[0]:0.4f}+/-{s[0]:0.4f}', f'{m[3]:0.4f}+/-{s[3]:0.4f}', f'{m[6]:0.4f}+/-{s[6]:0.4f}'],
['Valid', f'{m[1]:0.4f}+/-{s[1]:0.4f}', f'{m[4]:0.4f}+/-{s[4]:0.4f}', f'{m[7]:0.4f}+/-{s[7]:0.4f}'],
['Test', f'{m[2]:0.4f}+/-{s[2]:0.4f}', f'{m[5]:0.4f}+/-{s[5]:0.4f}', f'{m[8]:0.4f}+/-{s[8]:0.4f}']],
headers=['', 'MAE', 'MSE', 'R2']))
else:
for c in range(output[0].shape[-1]):
result = []
for func in funcs:
for i in range(3):
result.append(
[func(output[i * 2][j, :, c], output[i * 2 + 1][j, :, c]) for j in range(len(output[0]))])
result = np.array(result)
m = result.mean(axis=1)
s = result.std(axis=1)
print(tabulate(
[['Train', f'{m[0]:0.4f}+/-{s[0]:0.4f}', f'{m[3]:0.4f}+/-{s[3]:0.4f}', f'{m[6]:0.4f}+/-{s[6]:0.4f}'],
['Valid', f'{m[1]:0.4f}+/-{s[1]:0.4f}', f'{m[4]:0.4f}+/-{s[4]:0.4f}', f'{m[7]:0.4f}+/-{s[7]:0.4f}'],
['Test', f'{m[2]:0.4f}+/-{s[2]:0.4f}', f'{m[5]:0.4f}+/-{s[5]:0.4f}', f'{m[8]:0.4f}+/-{s[8]:0.4f}']],
headers=['', 'MAE', 'MSE', 'R2']))
return m, s
def create_csv(DATA_DIR, data):
"""Create a csv file of the architecture components.
Args:
DATA_DIR (str): data file location.
data (dict): the dictionary file containing the operations for each architecture.
"""
# Task specific
state_dims = ['dim(4)', 'dim(8)', 'dim(16)', 'dim(32)']
Ts = ['repeat(1)', 'repeat(2)', 'repeat(3)', 'repeat(4)']
attn_methods = ['attn(const)', 'attn(gcn)', 'attn(gat)', 'attn(sym-gat)', 'attn(linear)', 'attn(gen-linear)',
'attn(cos)']
attn_heads = ['head(1)', 'head(2)', 'head(4)', 'head(6)']
aggr_methods = ['aggr(max)', 'aggr(mean)', 'aggr(sum)']
update_methods = ['update(gru)', 'update(mlp)']
activations = ['act(sigmoid)', 'act(tanh)', 'act(relu)', 'act(linear)', 'act(elu)', 'act(softplus)',
'act(leaky_relu)',
'act(relu6)']
out = []
for state_dim in state_dims:
for T in Ts:
for attn_method in attn_methods:
for attn_head in attn_heads:
for aggr_method in aggr_methods:
for update_method in update_methods:
for activation in activations:
out.append(
[state_dim, T, attn_method, attn_head, aggr_method, update_method, activation])
out_pool = []
for functions in ['GlobalSumPool', 'GlobalMaxPool', 'GlobalAvgPool']:
for axis in ['(feature)', '(node)']: # Pool in terms of nodes or features
out_pool.append(functions + axis)
out_pool.append('flatten')
for state_dim in [16, 32, 64]:
out_pool.append(f'AttentionPool({state_dim})')
out_pool.append('AttentionSumPool')
out_connect = ['skip', 'connect']
def get_gat(index):
return out[index]
def get_pool(index):
return out_pool[index]
def get_connect(index):
return out_connect[index]
archs = np.array(data['arch_seq'])
rewards = np.array(data['raw_rewards'])
a = np.empty((len(archs), 0), dtype=np.object)
a = np.append(a, archs, axis=-1)
a = np.append(a, rewards[..., np.newaxis], axis=-1)
b = np.empty((0, 29), dtype=np.object)
for i in range(len(a)):
temp = a[i, :]
b0 = [get_gat(temp[0])[i] + '[cell1]' for i in range(len(get_gat(temp[0])))]
b1 = [get_connect(temp[1]) + '[link1]']
b2 = [get_gat(temp[2])[i] + '[cell2]' for i in range(len(get_gat(temp[2])))]
b3 = [get_connect(temp[3]) + '[link2]']
b4 = [get_connect(temp[4]) + '[link3]']
b5 = [get_gat(temp[5])[i] + '[cell3]' for i in range(len(get_gat(temp[5])))]
b6 = [get_connect(temp[6]) + '[link4]']
b7 = [get_connect(temp[7]) + '[link5]']
b8 = [get_connect(temp[8]) + '[link6]']
b9 = [get_pool(temp[9])]
bout = b0 + b1 + b2 + b3 + b4 + b5 + b6 + b7 + b8 + b9 + [temp[10]]
bout = np.array(bout, dtype=object)
b = np.append(b, bout[np.newaxis, ...], axis=0)
table = pd.DataFrame(data=b)
table.to_csv(DATA_DIR + 'nas_result.csv', encoding='utf-8', index=False, header=False)
def moving_average(time_list, data_list, window_size=100):
"""Calculate the moving average.
Args:
time_list (list): a list of timestamps.
data_list (list): a list of data points.
window_size (int): the window size.
Returns:
time array and data array
"""
res_list = []
times_list = []
for i in range(len(data_list) - window_size):
times_list.append(sum(time_list[i:i + window_size]) / window_size)
res_list.append(sum(data_list[i:i + window_size]) / window_size)
return np.array(times_list), np.array(res_list)
def plot_reward_vs_time(data, PLOT_DIR, ylim=None, time=True, plot=False, metric='MAE'):
"""Generate plot of search trajectory.
Args:
data (dict): the data dictionary.
PLOT_DIR (str): the location to store the figure.
ylim (float): the minimum value of the y axis.
time (bool): True if want time as x axis, else want instance number.
plot (bool): if want to create a plot.
metric (str): the type of metric on y axis.
"""
start_infos = data['start_infos'][0]
try:
start_time = to_sec(data['workload']['times'][0])
except:
start_time = to_sec(start_infos['timestamp'])
times = [to_sec(ts) - start_time for ts in data['timestamps']]
x = times
y = data['raw_rewards']
plt.figure(figsize=(5, 4))
if time:
plt.plot(np.array(x) / 60, y, 'o', markersize=3)
plt.xlabel('Time (min)')
else:
plt.plot(y, 'o', markersize=3)
plt.xlabel('Iterations')
plt.ylabel(f'Reward (-{metric})')
plt.xlim(left=0)
if ylim is not None:
plt.ylim(ylim)
plt.locator_params(axis='y', nbins=4)
plt.savefig(PLOT_DIR + 'reward.png', dpi=300, bbox_inches='tight')
plt.savefig(PLOT_DIR+'reward.svg', bbox_inches='tight')
if not plot:
plt.close();
def three_random_parity_plot(DATA_DIR, PLOT_DIR, multi_class=False, limits=None, plot=False, ticks=None):
"""Generate parity plots from three random seed trainings.
Args:
DATA_DIR (str): the location of the data file.
PLOT_DIR (str): the location to store the figure.
multi_class (bool): if it is multi-class regression.
limits (list): the y limits you want to set.
plot (bool): if want to create a plot.
ticks (list): the x axis ticks.
"""
_, _, _, _, y_true_raw, y_pred_raw = three_random_split(DATA_DIR, multi_class=multi_class)
if not multi_class:
y_true = y_true_raw.ravel()
y_pred = y_pred_raw.ravel()
scaler = StandardScaler()
y_true = scaler.fit_transform(y_true[..., np.newaxis]).squeeze()
y_pred = scaler.fit_transform(y_pred[..., np.newaxis]).squeeze()
fig, ax = plt.subplots(figsize=(4, 4))
min_value = np.min([y_true.min(), y_pred.min()])
max_value = np.max([y_true.max(), y_pred.max()])
dist = max_value - min_value
min_value -= 0.03 * dist
max_value += 0.03 * dist
if limits is not None:
min_value, max_value = limits
ax.plot(np.linspace(min_value, max_value, 100), np.linspace(min_value, max_value, 100), 'k--', alpha=0.5)
ax.scatter(y_true.ravel(), y_pred.ravel(), s=5, alpha=0.9)
plt.xlim(min_value, max_value)
plt.ylim(min_value, max_value)
plt.xlabel("True")
plt.ylabel("Predicted")
print(min_value, max_value)
from matplotlib import ticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 1))
if ticks is not None:
plt.xticks(ticks, ticks)
plt.yticks(ticks, ticks)
else:
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
ax.xaxis.set_major_formatter(formatter)
ax.yaxis.set_major_formatter(formatter)
# plt.tight_layout()
plt.savefig(PLOT_DIR + "parity_plot.png", bbox_inches='tight')
plt.savefig(PLOT_DIR + "parity_plot.svg", bbox_inches='tight')
if not plot:
plt.close();
else:
for c in range(y_true_raw.shape[-1]):
y_true = y_true_raw[..., c].ravel()
y_pred = y_pred_raw[..., c].ravel()
plt.figure(figsize=(4, 4))
min_value = np.min([y_true.min(), y_pred.min()])
max_value = np.max([y_true.max(), y_pred.max()])
dist = max_value - min_value
min_value -= 0.03 * dist
max_value += 0.03 * dist
if limits is not None:
min_value, max_value = limits
plt.plot(np.linspace(min_value, max_value, 100), np.linspace(min_value, max_value, 100), 'k--', alpha=0.5)
plt.scatter(y_true.ravel(), y_pred.ravel(), s=5, alpha=0.9)
plt.xlim(min_value, max_value)
plt.ylim(min_value, max_value)
plt.xlabel("True")
plt.ylabel("Predicted")
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
plt.savefig(PLOT_DIR + f"parity_plot_{c}.png", bbox_inches='tight')
if not plot:
plt.close();
def feature_importance(DATA_DIR, PLOT_DIR, plot=False):
"""Generate feature importance plots.
Args:
DATA_DIR (str): the location of the data file.
PLOT_DIR (str): the location to store the figure.
plot (bool): if want to create a plot.
"""
train_data = pd.read_csv(DATA_DIR + 'nas_result.csv', header=None)
df = train_data
df_new = pd.DataFrame()
for i in range(df.shape[1]):
if df.dtypes[i] == 'object':
vals = pd.get_dummies(df.iloc[:, i])
else:
vals = df.iloc[:, i]
df_new = pd.concat([df_new.reset_index(drop=True), vals.reset_index(drop=True)], axis=1)
X = df_new.iloc[:, :-1]
y = df_new.iloc[:, -1]
scaler = StandardScaler()
y = scaler.fit_transform(y.values[..., np.newaxis]).squeeze()
reg = RandomForestRegressor(n_estimators=100, random_state=0).fit(X.values, y)
prediction, bias, contributions = ti.predict(reg, X.values)
mask = np.copy(X.values)
mask = mask.astype(float)
mask[mask == 0] = -1
importance = np.multiply(contributions, mask)
importance = importance.mean(axis=0)
importance = importance / np.max(np.abs(importance))
indices = np.argsort(importance)[-5:]
indices_neg = np.argsort(importance)[:5]
plt.figure(figsize=(12, 4))
plt.barh(range(5, 10), importance[indices], align='center')
plt.barh(range(5), importance[indices_neg], align='center')
plt.yticks(range(10), [X.columns[i] for i in indices_neg] + [X.columns[i] for i in indices])
plt.xlabel('Relative Importance')
plt.tight_layout()
plt.savefig(PLOT_DIR + 'feature_importance.png', dpi=300, bbox_inches='tight')
if not plot:
plt.close();
| 39.087766 | 118 | 0.578758 |
7953dc178fe088c3778dc1f32c2475638aa9371d | 3,341 | py | Python | Dashboard/account/migrations/0001_initial.py | Dheerajdoppalapudi/DashBoard-BackEnd | 67fdffcd8d2236cb7032588157f1f482bef1704c | [
"Apache-2.0"
] | null | null | null | Dashboard/account/migrations/0001_initial.py | Dheerajdoppalapudi/DashBoard-BackEnd | 67fdffcd8d2236cb7032588157f1f482bef1704c | [
"Apache-2.0"
] | 25 | 2022-01-07T09:04:19.000Z | 2022-02-27T17:51:16.000Z | Dashboard/account/migrations/0001_initial.py | Dheerajdoppalapudi/DashBoard-BackEnd | 67fdffcd8d2236cb7032588157f1f482bef1704c | [
"Apache-2.0"
] | 2 | 2022-01-07T09:11:18.000Z | 2022-01-11T06:07:17.000Z | # Generated by Django 3.2.8 on 2022-02-05 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id',
models.BigAutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('password',
models.CharField(max_length=128, verbose_name='password')),
('last_login',
models.DateTimeField(blank=True,
null=True,
verbose_name='last login')),
('is_superuser',
models.BooleanField(
default=False,
help_text=
'Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status')),
('name',
models.CharField(db_index=True, max_length=255, unique=True)),
('email',
models.EmailField(db_index=True, max_length=255,
unique=True)),
('is_staff', models.BooleanField(default=False)),
('eid', models.CharField(blank=True, default='',
max_length=10)),
('designation', models.CharField(default='', max_length=100)),
('university',
models.CharField(choices=[('univ', 'UNIVERSITY'),
('vskp', 'VISAKHAPATNAM'),
('hyd', 'HYDERABAD'),
('blr', 'BENGALURU')],
default='vskp',
max_length=15)),
('access',
models.CharField(choices=[('view', 'VIEW'),
('edit_all', 'EDIT ALL'),
('edit_some', 'EDIT SOME')],
default='view',
max_length=10)),
('groups',
models.ManyToManyField(
blank=True,
help_text=
'The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set',
related_query_name='user',
to='auth.Group',
verbose_name='groups')),
('user_permissions',
models.ManyToManyField(
blank=True,
help_text='Specific permissions for this user.',
related_name='user_set',
related_query_name='user',
to='auth.Permission',
verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 41.7625 | 121 | 0.414247 |
7953dcdf5d329cbe2d8afb091e97c5e334913c2b | 6,242 | py | Python | tests/ci/fast_test_check.py | stdpain/ClickHouse | 7699d3f35e8d51d0c7bac39fc14469baeea928cd | [
"Apache-2.0"
] | null | null | null | tests/ci/fast_test_check.py | stdpain/ClickHouse | 7699d3f35e8d51d0c7bac39fc14469baeea928cd | [
"Apache-2.0"
] | null | null | null | tests/ci/fast_test_check.py | stdpain/ClickHouse | 7699d3f35e8d51d0c7bac39fc14469baeea928cd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import logging
import subprocess
import os
import csv
import sys
from github import Github
from pr_info import PRInfo, get_event
from s3_helper import S3Helper
from get_robot_token import get_best_robot_token
from upload_result_helper import upload_results
from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
NAME = 'Fast test (actions)'
def get_fasttest_cmd(workspace, output_path, ccache_path, repo_path, pr_number, commit_sha, image):
return f"docker run --cap-add=SYS_PTRACE " \
f"-e FASTTEST_WORKSPACE=/fasttest-workspace -e FASTTEST_OUTPUT=/test_output " \
f"-e FASTTEST_SOURCE=/ClickHouse --cap-add=SYS_PTRACE " \
f"-e PULL_REQUEST_NUMBER={pr_number} -e COMMIT_SHA={commit_sha} -e COPY_CLICKHOUSE_BINARY_TO_OUTPUT=1 " \
f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/ClickHouse --volume={output_path}:/test_output "\
f"--volume={ccache_path}:/fasttest-workspace/ccache {image}"
def process_results(result_folder):
test_results = []
additional_files = []
# Just upload all files from result_folder.
# If task provides processed results, then it's responsible for content of result_folder.
if os.path.exists(result_folder):
test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))]
additional_files = [os.path.join(result_folder, f) for f in test_files]
status_path = os.path.join(result_folder, "check_status.tsv")
logging.info("Found test_results.tsv")
status = list(csv.reader(open(status_path, 'r'), delimiter='\t'))
if len(status) != 1 or len(status[0]) != 2:
return "error", "Invalid check_status.tsv", test_results, additional_files
state, description = status[0][0], status[0][1]
results_path = os.path.join(result_folder, "test_results.tsv")
test_results = list(csv.reader(open(results_path, 'r'), delimiter='\t'))
if len(test_results) == 0:
raise Exception("Empty results")
return state, description, test_results, additional_files
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
stopwatch = Stopwatch()
temp_path = os.getenv("TEMP_PATH", os.path.abspath("."))
caches_path = os.getenv("CACHES_PATH", temp_path)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
pr_info = PRInfo(get_event())
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = get_image_with_version(temp_path, 'clickhouse/fasttest')
s3_helper = S3Helper('https://s3.amazonaws.com')
workspace = os.path.join(temp_path, "fasttest-workspace")
if not os.path.exists(workspace):
os.makedirs(workspace)
output_path = os.path.join(temp_path, "fasttest-output")
if not os.path.exists(output_path):
os.makedirs(output_path)
cache_path = os.path.join(caches_path, "fasttest")
if not os.path.exists(cache_path):
os.makedirs(cache_path)
repo_path = os.path.join(temp_path, "fasttest-repo")
if not os.path.exists(repo_path):
os.makedirs(repo_path)
run_cmd = get_fasttest_cmd(workspace, output_path, cache_path, repo_path, pr_info.number, pr_info.sha, docker_image)
logging.info("Going to run fasttest with cmd %s", run_cmd)
logs_path = os.path.join(temp_path, "fasttest-logs")
if not os.path.exists(logs_path):
os.makedirs(logs_path)
run_log_path = os.path.join(logs_path, 'runlog.log')
with open(run_log_path, 'w') as log:
retcode = subprocess.Popen(run_cmd, shell=True, stderr=log, stdout=log).wait()
if retcode == 0:
logging.info("Run successfully")
else:
logging.info("Run failed")
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {cache_path}", shell=True)
test_output_files = os.listdir(output_path)
additional_logs = []
for f in test_output_files:
additional_logs.append(os.path.join(output_path, f))
test_log_exists = 'test_log.txt' in test_output_files or 'test_result.txt' in test_output_files
test_result_exists = 'test_results.tsv' in test_output_files
test_results = []
if 'submodule_log.txt' not in test_output_files:
description = "Cannot clone repository"
state = "failure"
elif 'cmake_log.txt' not in test_output_files:
description = "Cannot fetch submodules"
state = "failure"
elif 'build_log.txt' not in test_output_files:
description = "Cannot finish cmake"
state = "failure"
elif 'install_log.txt' not in test_output_files:
description = "Cannot build ClickHouse"
state = "failure"
elif not test_log_exists and not test_result_exists:
description = "Cannot install or start ClickHouse"
state = "failure"
else:
state, description, test_results, additional_logs = process_results(output_path)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, NAME, test_results)
report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, NAME, True)
print("::notice ::Report url: {}".format(report_url))
post_commit_status(gh, pr_info.sha, NAME, description, state, report_url)
prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, NAME)
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
# Refuse other checks to run if fast test failed
if state != 'success':
if 'force-tests' in pr_info.labels:
print("'force-tests' enabled, will report success")
else:
sys.exit(1)
| 40.012821 | 160 | 0.714354 |
7953ddc681294c6458bf383284943581703fb418 | 610 | py | Python | jsngram/users/admin.py | jjh0106/jsngram | 74f2fd79ddd6a6975d3c981ca9cb5bbed050f532 | [
"MIT"
] | null | null | null | jsngram/users/admin.py | jjh0106/jsngram | 74f2fd79ddd6a6975d3c981ca9cb5bbed050f532 | [
"MIT"
] | 11 | 2020-06-05T20:06:56.000Z | 2022-02-17T20:23:22.000Z | jsngram/users/admin.py | jjh0106/jsngram | 74f2fd79ddd6a6975d3c981ca9cb5bbed050f532 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from jsngram.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (
("User Profile", {"fields": ("name", "followers", "following", "profile_image", "bio", "website", "gender")}),
) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 30.5 | 118 | 0.718033 |
7953de15d837ee9d94c324f5ebb4d147cc0bd346 | 122,627 | py | Python | numpy/lib/tests/test_function_base.py | bdvd/numpy | cea994fac86dbc5af7bee3f15fc5b475a99163fa | [
"BSD-3-Clause"
] | 1 | 2020-12-07T17:25:19.000Z | 2020-12-07T17:25:19.000Z | numpy/lib/tests/test_function_base.py | sahanabalappa/numpy | cea994fac86dbc5af7bee3f15fc5b475a99163fa | [
"BSD-3-Clause"
] | 20 | 2020-02-14T11:37:52.000Z | 2020-02-18T21:18:45.000Z | numpy/lib/tests/test_function_base.py | sahanabalappa/numpy | cea994fac86dbc5af7bee3f15fc5b475a99163fa | [
"BSD-3-Clause"
] | null | null | null | import operator
import warnings
import sys
import decimal
from fractions import Fraction
import pytest
import numpy as np
from numpy import ma
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY,
assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT,
)
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,
delete, diff, digitize, extract, flipud, gradient, hamming, hanning,
i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90,
select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize
)
from numpy.compat import long
def get_mat(n):
data = np.arange(n)
data = np.add.outer(data, data)
return data
def _make_complex(real, imag):
"""
Like real + 1j * imag, but behaves as expected when imag contains non-finite
values
"""
ret = np.zeros(np.broadcast(real, imag).shape, np.complex_)
ret.real = real
ret.imag = imag
return ret
class TestRot90:
def test_basic(self):
assert_raises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1))
a = [[0, 1, 2],
[3, 4, 5]]
b1 = [[2, 5],
[1, 4],
[0, 3]]
b2 = [[5, 4, 3],
[2, 1, 0]]
b3 = [[3, 0],
[4, 1],
[5, 2]]
b4 = [[0, 1, 2],
[3, 4, 5]]
for k in range(-3, 13, 4):
assert_equal(rot90(a, k=k), b1)
for k in range(-2, 13, 4):
assert_equal(rot90(a, k=k), b2)
for k in range(-1, 13, 4):
assert_equal(rot90(a, k=k), b3)
for k in range(0, 13, 4):
assert_equal(rot90(a, k=k), b4)
assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a)
assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1)))
def test_axes(self):
a = np.ones((50, 40, 3))
assert_equal(rot90(a).shape, (40, 50, 3))
assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1)))
assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1)))
def test_rotation_axes(self):
a = np.arange(8).reshape((2,2,2))
a_rot90_01 = [[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]]
a_rot90_12 = [[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]]
a_rot90_20 = [[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]]
a_rot90_10 = [[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]]
assert_equal(rot90(a, axes=(0, 1)), a_rot90_01)
assert_equal(rot90(a, axes=(1, 0)), a_rot90_10)
assert_equal(rot90(a, axes=(1, 2)), a_rot90_12)
for k in range(1,5):
assert_equal(rot90(a, k=k, axes=(2, 0)),
rot90(a_rot90_20, k=k-1, axes=(2, 0)))
class TestFlip:
def test_axes(self):
assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)
assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)
assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
def test_basic_lr(self):
a = get_mat(4)
b = a[:, ::-1]
assert_equal(np.flip(a, 1), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[2, 1, 0],
[5, 4, 3]]
assert_equal(np.flip(a, 1), b)
def test_basic_ud(self):
a = get_mat(4)
b = a[::-1, :]
assert_equal(np.flip(a, 0), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[3, 4, 5],
[0, 1, 2]]
assert_equal(np.flip(a, 0), b)
def test_3d_swap_axis0(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
assert_equal(np.flip(a, 0), b)
def test_3d_swap_axis1(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
assert_equal(np.flip(a, 1), b)
def test_3d_swap_axis2(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[1, 0],
[3, 2]],
[[5, 4],
[7, 6]]])
assert_equal(np.flip(a, 2), b)
def test_4d(self):
a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
for i in range(a.ndim):
assert_equal(np.flip(a, i),
np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
def test_default_axis(self):
a = np.array([[1, 2, 3],
[4, 5, 6]])
b = np.array([[6, 5, 4],
[3, 2, 1]])
assert_equal(np.flip(a), b)
def test_multiple_axes(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
assert_equal(np.flip(a, axis=()), a)
b = np.array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
assert_equal(np.flip(a, axis=(0, 2)), b)
c = np.array([[[3, 2],
[1, 0]],
[[7, 6],
[5, 4]]])
assert_equal(np.flip(a, axis=(1, 2)), c)
class TestAny:
def test_basic(self):
y1 = [0, 0, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 0, 1, 0]
assert_(np.any(y1))
assert_(np.any(y3))
assert_(not np.any(y2))
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert_(np.any(y1))
assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
class TestAll:
def test_basic(self):
y1 = [0, 1, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 1, 1, 1]
assert_(not np.all(y1))
assert_(np.all(y3))
assert_(not np.all(y2))
assert_(np.all(~np.array(y2)))
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert_(not np.all(y1))
assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
class TestCopy:
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
a_copy = np.copy(a)
assert_array_equal(a, a_copy)
a_copy[0, 0] = 10
assert_equal(a[0, 0], 1)
assert_equal(a_copy[0, 0], 10)
def test_order(self):
# It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn:
# github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa
a = np.array([[1, 2], [3, 4]])
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
a_fort = np.array([[1, 2], [3, 4]], order="F")
assert_(not a_fort.flags.c_contiguous)
assert_(a_fort.flags.f_contiguous)
a_copy = np.copy(a)
assert_(a_copy.flags.c_contiguous)
assert_(not a_copy.flags.f_contiguous)
a_fort_copy = np.copy(a_fort)
assert_(not a_fort_copy.flags.c_contiguous)
assert_(a_fort_copy.flags.f_contiguous)
class TestAverage:
def test_basic(self):
y1 = np.array([1, 2, 3])
assert_(average(y1, axis=0) == 2.)
y2 = np.array([1., 2., 3.])
assert_(average(y2, axis=0) == 2.)
y3 = [0., 0., 0.]
assert_(average(y3, axis=0) == 0.)
y4 = np.ones((4, 4))
y4[0, 1] = 0
y4[1, 0] = 2
assert_almost_equal(y4.mean(0), average(y4, 0))
assert_almost_equal(y4.mean(1), average(y4, 1))
y5 = rand(5, 5)
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
actual = average(y, weights=w)
desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum()
assert_almost_equal(actual, desired)
y1 = np.array([[1, 2, 3], [4, 5, 6]])
w0 = [1, 2]
actual = average(y1, weights=w0, axis=0)
desired = np.array([3., 4., 5.])
assert_almost_equal(actual, desired)
w1 = [0, 0, 1]
actual = average(y1, weights=w1, axis=1)
desired = np.array([3., 6.])
assert_almost_equal(actual, desired)
# This should raise an error. Can we test for that ?
# assert_equal(average(y1, weights=w1), 9./2.)
# 2D Case
w2 = [[0, 0, 1], [0, 0, 2]]
desired = np.array([3., 6.])
assert_array_equal(average(y1, weights=w2, axis=1), desired)
assert_equal(average(y1, weights=w2), 5.)
y3 = rand(5).astype(np.float32)
w3 = rand(5).astype(np.float64)
assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3))
def test_returned(self):
y = np.array([[1, 2, 3], [4, 5, 6]])
# No weights
avg, scl = average(y, returned=True)
assert_equal(scl, 6.)
avg, scl = average(y, 0, returned=True)
assert_array_equal(scl, np.array([2., 2., 2.]))
avg, scl = average(y, 1, returned=True)
assert_array_equal(scl, np.array([3., 3.]))
# With weights
w0 = [1, 2]
avg, scl = average(y, weights=w0, axis=0, returned=True)
assert_array_equal(scl, np.array([3., 3., 3.]))
w1 = [1, 2, 3]
avg, scl = average(y, weights=w1, axis=1, returned=True)
assert_array_equal(scl, np.array([6., 6.]))
w2 = [[0, 0, 1], [1, 2, 3]]
avg, scl = average(y, weights=w2, axis=1, returned=True)
assert_array_equal(scl, np.array([1., 6.]))
def test_subclasses(self):
class subclass(np.ndarray):
pass
a = np.array([[1,2],[3,4]]).view(subclass)
w = np.array([[1,2],[3,4]]).view(subclass)
assert_equal(type(np.average(a)), subclass)
assert_equal(type(np.average(a, weights=w)), subclass)
def test_upcasting(self):
typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
for at, wt, rt in typs:
a = np.array([[1,2],[3,4]], dtype=at)
w = np.array([[1,2],[3,4]], dtype=wt)
assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))
def test_object_dtype(self):
a = np.array([decimal.Decimal(x) for x in range(10)])
w = np.array([decimal.Decimal(1) for _ in range(10)])
w /= w.sum()
assert_almost_equal(a.mean(0), average(a, weights=w))
class TestSelect:
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
conditions = [np.array([False, False, False]),
np.array([False, True, False]),
np.array([False, False, True])]
def _select(self, cond, values, default=0):
output = []
for m in range(len(cond)):
output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]
return output
def test_basic(self):
choices = self.choices
conditions = self.conditions
assert_array_equal(select(conditions, choices, default=15),
self._select(conditions, choices, default=15))
assert_equal(len(choices), 3)
assert_equal(len(conditions), 3)
def test_broadcasting(self):
conditions = [np.array(True), np.array([False, True, False])]
choices = [1, np.arange(12).reshape(4, 3)]
assert_array_equal(select(conditions, choices), np.ones((4, 3)))
# default can broadcast too:
assert_equal(select([True], [0], default=[0]).shape, (1,))
def test_return_dtype(self):
assert_equal(select(self.conditions, self.choices, 1j).dtype,
np.complex_)
# But the conditions need to be stronger then the scalar default
# if it is scalar.
choices = [choice.astype(np.int8) for choice in self.choices]
assert_equal(select(self.conditions, choices).dtype, np.int8)
d = np.array([1, 2, 3, np.nan, 5, 7])
m = np.isnan(d)
assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
def test_deprecated_empty(self):
assert_raises(ValueError, select, [], [], 3j)
assert_raises(ValueError, select, [], [])
def test_non_bool_deprecation(self):
choices = self.choices
conditions = self.conditions[:]
conditions[0] = conditions[0].astype(np.int_)
assert_raises(TypeError, select, conditions, choices)
conditions[0] = conditions[0].astype(np.uint8)
assert_raises(TypeError, select, conditions, choices)
assert_raises(TypeError, select, conditions, choices)
def test_many_arguments(self):
# This used to be limited by NPY_MAXARGS == 32
conditions = [np.array([False])] * 100
choices = [np.array([1])] * 100
select(conditions, choices)
class TestInsert:
def test_basic(self):
a = [1, 2, 3]
assert_equal(insert(a, 0, 1), [1, 1, 2, 3])
assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9])
assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3])
assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9])
b = np.array([0, 1], dtype=np.float64)
assert_equal(insert(b, 0, b[0]), [0., 0., 1.])
assert_equal(insert(b, [], []), b)
# Bools will be treated differently in the future:
# assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_equal(
insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3])
assert_(w[0].category is FutureWarning)
def test_multidim(self):
a = [[1, 1, 1]]
r = [[2, 2, 2],
[1, 1, 1]]
assert_equal(insert(a, 0, [1]), [1, 1, 1, 1])
assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)
assert_equal(insert(a, 0, 2, axis=0), r)
assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.arange(1, 4).repeat(3).reshape(3, 3)
c = np.concatenate(
(a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T,
a[:, 1:2]), axis=1)
assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b)
assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)
# scalars behave differently, in this case exactly opposite:
assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)
assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c)
a = np.arange(4).reshape(2, 2)
assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a)
assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:,:, 3], axis=-1),
insert(a, 1, a[:,:, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2,:], axis=-2),
insert(a, 1, a[:, 2,:], axis=1))
# invalid axis value
assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3)
assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
insert(a, 1, a[:, :, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
insert(a, 1, a[:, 2, :], axis=1))
def test_0d(self):
# This is an error in the future
a = np.array(1)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(insert(a, [], 2, axis=0), np.array(2))
assert_(w[0].category is DeprecationWarning)
def test_subclass(self):
class SubClass(np.ndarray):
pass
a = np.arange(10).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
assert_(isinstance(np.insert(a, [], []), SubClass))
assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass))
# This is an error in the future:
a = np.array(1).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
def test_index_array_copied(self):
x = np.array([1, 1, 1])
np.insert([0, 1, 2], x, [3, 4, 5])
assert_equal(x, np.array([1, 1, 1]))
def test_structured_array(self):
a = np.array([(1, 'a'), (2, 'b'), (3, 'c')],
dtype=[('foo', 'i'), ('bar', 'a1')])
val = (4, 'd')
b = np.insert(a, 0, val)
assert_array_equal(b[0], np.array(val, dtype=b.dtype))
val = [(4, 'd')] * 2
b = np.insert(a, [0, 2], val)
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
class TestAmax:
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.amax(a), 10.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0])
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
class TestAmin:
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.amin(a), -5.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0])
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
class TestPtp:
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
assert_equal(a.ptp(axis=0), 15.0)
b = np.array([[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]])
assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0])
assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])
assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]])
assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]])
class TestCumsum:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
np.uint32, np.float32, np.float64, np.complex64,
np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
assert_array_equal(np.cumsum(a, axis=0), tgt)
tgt = np.array(
[[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
assert_array_equal(np.cumsum(a2, axis=0), tgt)
tgt = np.array(
[[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
assert_array_equal(np.cumsum(a2, axis=1), tgt)
class TestProd:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, np.prod, a)
assert_raises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
class TestCumprod:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, np.cumprod, a)
assert_raises(ArithmeticError, np.cumprod, a2, 1)
assert_raises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
1320, 6600, 26400], ctype))
assert_array_equal(np.cumprod(a2, axis=0),
np.array([[1, 2, 3, 4],
[5, 12, 21, 36],
[50, 36, 84, 180]], ctype))
assert_array_equal(np.cumprod(a2, axis=-1),
np.array([[1, 2, 6, 24],
[5, 30, 210, 1890],
[10, 30, 120, 600]], ctype))
class TestDiff:
def test_basic(self):
x = [1, 4, 6, 7, 12]
out = np.array([3, 2, 1, 5])
out2 = np.array([-1, -1, 4])
out3 = np.array([0, 5])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
x = [1.1, 2.2, 3.0, -0.2, -0.1]
out = np.array([1.1, 0.8, -3.2, 0.1])
assert_almost_equal(diff(x), out)
x = [True, True, False, False]
out = np.array([False, True, False])
out2 = np.array([True, True])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
def test_axis(self):
x = np.zeros((10, 20, 30))
x[:, 1::2, :] = 1
exp = np.ones((10, 19, 30))
exp[:, 1::2, :] = -1
assert_array_equal(diff(x), np.zeros((10, 20, 29)))
assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
assert_array_equal(diff(x, axis=1), exp)
assert_array_equal(diff(x, axis=-2), exp)
assert_raises(np.AxisError, diff, x, axis=3)
assert_raises(np.AxisError, diff, x, axis=-4)
x = np.array(1.11111111111, np.float64)
assert_raises(ValueError, diff, x)
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
out2 = out1[:, :, 1:] - out1[:, :, :-1]
out3 = x[1:, :, :] - x[:-1, :, :]
out4 = out3[1:, :, :] - out3[:-1, :, :]
assert_array_equal(diff(x), out1)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
def test_n(self):
x = list(range(3))
assert_raises(ValueError, diff, x, n=-1)
output = [diff(x, n=n) for n in range(1, 5)]
expected = [[1, 1], [0], [], []]
assert_(diff(x, n=0) is x)
for n, (expected, out) in enumerate(zip(expected, output), start=1):
assert_(type(out) is np.ndarray)
assert_array_equal(out, expected)
assert_equal(out.dtype, np.int_)
assert_equal(len(out), max(0, len(x) - n))
def test_times(self):
x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
expected = [
np.array([1, 1], dtype='timedelta64[D]'),
np.array([0], dtype='timedelta64[D]'),
]
expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
for n, exp in enumerate(expected, start=1):
out = diff(x, n=n)
assert_array_equal(out, exp)
assert_equal(out.dtype, exp.dtype)
def test_subclass(self):
x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
mask=[[False, False], [True, False],
[False, True], [True, True], [False, False]])
out = diff(x)
assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
assert_array_equal(out.mask, [[False], [True],
[True], [True], [False]])
assert_(type(out) is type(x))
out3 = diff(x, n=3)
assert_array_equal(out3.data, [[], [], [], [], []])
assert_array_equal(out3.mask, [[], [], [], [], []])
assert_(type(out3) is type(x))
def test_prepend(self):
x = np.arange(5) + 1
assert_array_equal(diff(x, prepend=0), np.ones(5))
assert_array_equal(diff(x, prepend=[0]), np.ones(5))
assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x)
assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6))
x = np.arange(4).reshape(2, 2)
result = np.diff(x, axis=1, prepend=0)
expected = [[0, 1], [2, 1]]
assert_array_equal(result, expected)
result = np.diff(x, axis=1, prepend=[[0], [0]])
assert_array_equal(result, expected)
result = np.diff(x, axis=0, prepend=0)
expected = [[0, 1], [2, 2]]
assert_array_equal(result, expected)
result = np.diff(x, axis=0, prepend=[[0, 0]])
assert_array_equal(result, expected)
assert_raises(ValueError, np.diff, x, prepend=np.zeros((3,3)))
assert_raises(np.AxisError, diff, x, prepend=0, axis=3)
def test_append(self):
x = np.arange(5)
result = diff(x, append=0)
expected = [1, 1, 1, 1, -4]
assert_array_equal(result, expected)
result = diff(x, append=[0])
assert_array_equal(result, expected)
result = diff(x, append=[0, 2])
expected = expected + [2]
assert_array_equal(result, expected)
x = np.arange(4).reshape(2, 2)
result = np.diff(x, axis=1, append=0)
expected = [[1, -1], [1, -3]]
assert_array_equal(result, expected)
result = np.diff(x, axis=1, append=[[0], [0]])
assert_array_equal(result, expected)
result = np.diff(x, axis=0, append=0)
expected = [[2, 2], [-2, -3]]
assert_array_equal(result, expected)
result = np.diff(x, axis=0, append=[[0, 0]])
assert_array_equal(result, expected)
assert_raises(ValueError, np.diff, x, append=np.zeros((3,3)))
assert_raises(np.AxisError, diff, x, append=0, axis=3)
class TestDelete:
def setup(self):
self.a = np.arange(5)
self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
def _check_inverse_of_slicing(self, indices):
a_del = delete(self.a, indices)
nd_a_del = delete(self.nd_a, indices, axis=1)
msg = 'Delete failed for obj: %r' % indices
# NOTE: The cast should be removed after warning phase for bools
if not isinstance(indices, (slice, int, long, np.integer)):
indices = np.asarray(indices, dtype=np.intp)
indices = indices[(indices >= 0) & (indices < 5)]
assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a,
err_msg=msg)
xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0])
assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg)
def test_slices(self):
lims = [-6, -2, 0, 1, 2, 4, 5]
steps = [-3, -1, 1, 3]
for start in lims:
for stop in lims:
for step in steps:
s = slice(start, stop, step)
self._check_inverse_of_slicing(s)
def test_fancy(self):
# Deprecation/FutureWarning tests should be kept after change.
self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))
with warnings.catch_warnings():
warnings.filterwarnings('error', category=DeprecationWarning)
assert_raises(DeprecationWarning, delete, self.a, [100])
assert_raises(DeprecationWarning, delete, self.a, [-100])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=FutureWarning)
self._check_inverse_of_slicing([0, -1, 2, 2])
obj = np.array([True, False, False], dtype=bool)
self._check_inverse_of_slicing(obj)
assert_(w[0].category is FutureWarning)
assert_(w[1].category is FutureWarning)
def test_single(self):
self._check_inverse_of_slicing(0)
self._check_inverse_of_slicing(-4)
def test_0d(self):
a = np.array(1)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(delete(a, [], axis=0), a)
assert_(w[0].category is DeprecationWarning)
def test_subclass(self):
class SubClass(np.ndarray):
pass
a = self.a.view(SubClass)
assert_(isinstance(delete(a, 0), SubClass))
assert_(isinstance(delete(a, []), SubClass))
assert_(isinstance(delete(a, [0, 1]), SubClass))
assert_(isinstance(delete(a, slice(1, 2)), SubClass))
assert_(isinstance(delete(a, slice(1, -2)), SubClass))
def test_array_order_preserve(self):
# See gh-7113
k = np.arange(10).reshape(2, 5, order='F')
m = delete(k, slice(60, None), axis=1)
# 'k' is Fortran ordered, and 'm' should have the
# same ordering as 'k' and NOT become C ordered
assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
class TestGradient:
def test_basic(self):
v = [[1, 1], [3, 4]]
x = np.array(v)
dx = [np.array([[2., 3.], [2., 3.]]),
np.array([[0., 0.], [1., 1.]])]
assert_array_equal(gradient(x), dx)
assert_array_equal(gradient(v), dx)
def test_args(self):
dx = np.cumsum(np.ones(5))
dx_uneven = [1., 2., 5., 9., 11.]
f_2d = np.arange(25).reshape(5, 5)
# distances must be scalars or have size equal to gradient[axis]
gradient(np.arange(5), 3.)
gradient(np.arange(5), np.array(3.))
gradient(np.arange(5), dx)
# dy is set equal to dx because scalar
gradient(f_2d, 1.5)
gradient(f_2d, np.array(1.5))
gradient(f_2d, dx_uneven, dx_uneven)
# mix between even and uneven spaces and
# mix between scalar and vector
gradient(f_2d, dx, 2)
# 2D but axis specified
gradient(f_2d, dx, axis=1)
# 2d coordinate arguments are not yet allowed
assert_raises_regex(ValueError, '.*scalars or 1d',
gradient, f_2d, np.stack([dx]*2, axis=-1), 1)
def test_badargs(self):
f_2d = np.arange(25).reshape(5, 5)
x = np.cumsum(np.ones(5))
# wrong sizes
assert_raises(ValueError, gradient, f_2d, x, np.ones(2))
assert_raises(ValueError, gradient, f_2d, 1, np.ones(2))
assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2))
# wrong number of arguments
assert_raises(TypeError, gradient, f_2d, x)
assert_raises(TypeError, gradient, f_2d, x, axis=(0,1))
assert_raises(TypeError, gradient, f_2d, x, x, x)
assert_raises(TypeError, gradient, f_2d, 1, 1, 1)
assert_raises(TypeError, gradient, f_2d, x, x, axis=1)
assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1)
def test_datetime64(self):
# Make sure gradient() can handle special types like datetime64
x = np.array(
['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
'1910-10-12', '1910-12-12', '1912-12-12'],
dtype='datetime64[D]')
dx = np.array(
[-5, -3, 0, 31, 61, 396, 731],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
x = np.ma.array([[1, 1], [3, 4]],
mask=[[False, False], [False, False]])
out = gradient(x)[0]
assert_equal(type(out), type(x))
# And make sure that the output and input don't have aliased mask
# arrays
assert_(x._mask is not out._mask)
# Also check that edge_order=2 doesn't alter the original mask
x2 = np.ma.arange(5)
x2[2] = np.ma.masked
np.gradient(x2, edge_order=2)
assert_array_equal(x2.mask, [False, False, True, False, False])
def test_second_order_accurate(self):
# Testing that the relative numerical error is less that 3% for
# this example problem. This corresponds to second order
# accurate finite differences for all interior and boundary
# points.
x = np.linspace(0, 1, 10)
dx = x[1] - x[0]
y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
analytical = 6 * x ** 2 + 8 * x + 2
num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
# test with unevenly spaced
np.random.seed(0)
x = np.sort(np.random.random(10))
y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
analytical = 6 * x ** 2 + 8 * x + 2
num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
def test_spacing(self):
f = np.array([0, 2., 3., 4., 5., 5.])
f = np.tile(f, (6,1)) + f.reshape(-1, 1)
x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
x_even = np.arange(6.)
fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))
fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))
fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))
fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))
# evenly spaced
for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)
res2 = gradient(f, x_even, x_even,
axis=(0,1), edge_order=edge_order)
res3 = gradient(f, x_even, x_even,
axis=None, edge_order=edge_order)
assert_array_equal(res1, res2)
assert_array_equal(res2, res3)
assert_almost_equal(res1[0], exp_res.T)
assert_almost_equal(res1[1], exp_res)
res1 = gradient(f, 1., axis=0, edge_order=edge_order)
res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_almost_equal(res2, exp_res.T)
res1 = gradient(f, 1., axis=1, edge_order=edge_order)
res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_array_equal(res2, exp_res)
# unevenly spaced
for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
res1 = gradient(f, x_uneven, x_uneven,
axis=(0,1), edge_order=edge_order)
res2 = gradient(f, x_uneven, x_uneven,
axis=None, edge_order=edge_order)
assert_array_equal(res1, res2)
assert_almost_equal(res1[0], exp_res.T)
assert_almost_equal(res1[1], exp_res)
res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
assert_almost_equal(res1, exp_res.T)
res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
assert_almost_equal(res1, exp_res)
# mixed
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)
assert_array_equal(res1[0], res2[1])
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord1.T)
assert_almost_equal(res1[1], fdx_uneven_ord1)
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)
assert_array_equal(res1[0], res2[1])
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord2.T)
assert_almost_equal(res1[1], fdx_uneven_ord2)
def test_specific_axes(self):
# Testing that gradient can work on a given axis only
v = [[1, 1], [3, 4]]
x = np.array(v)
dx = [np.array([[2., 3.], [2., 3.]]),
np.array([[0., 0.], [1., 1.]])]
assert_array_equal(gradient(x, axis=0), dx[0])
assert_array_equal(gradient(x, axis=1), dx[1])
assert_array_equal(gradient(x, axis=-1), dx[1])
assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])
# test axis=None which means all axes
assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
# and is the same as no axis keyword given
assert_almost_equal(gradient(x, axis=None), gradient(x))
# test vararg order
assert_array_equal(gradient(x, 2, 3, axis=(1, 0)),
[dx[1]/2.0, dx[0]/3.0])
# test maximal number of varargs
assert_raises(TypeError, gradient, x, 1, 2, axis=1)
assert_raises(np.AxisError, gradient, x, axis=3)
assert_raises(np.AxisError, gradient, x, axis=-3)
# assert_raises(TypeError, gradient, x, axis=[1,])
def test_timedelta64(self):
# Make sure gradient() can handle special types like timedelta64
x = np.array(
[-5, -3, 10, 12, 61, 321, 300],
dtype='timedelta64[D]')
dx = np.array(
[2, 7, 7, 25, 154, 119, -21],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_inexact_dtypes(self):
for dt in [np.float16, np.float32, np.float64]:
# dtypes should not be promoted in a different way to what diff does
x = np.array([1, 2, 3], dtype=dt)
assert_equal(gradient(x).dtype, np.diff(x).dtype)
def test_values(self):
# needs at least 2 points for edge_order ==1
gradient(np.arange(2), edge_order=1)
# needs at least 3 points for edge_order ==1
gradient(np.arange(3), edge_order=2)
assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
@pytest.mark.parametrize('f_dtype', [np.uint8, np.uint16,
np.uint32, np.uint64])
def test_f_decreasing_unsigned_int(self, f_dtype):
f = np.array([5, 4, 3, 2, 1], dtype=f_dtype)
g = gradient(f)
assert_array_equal(g, [-1]*len(f))
@pytest.mark.parametrize('f_dtype', [np.int8, np.int16,
np.int32, np.int64])
def test_f_signed_int_big_jump(self, f_dtype):
maxint = np.iinfo(f_dtype).max
x = np.array([1, 3])
f = np.array([-1, maxint], dtype=f_dtype)
dfdx = gradient(f, x)
assert_array_equal(dfdx, [(maxint + 1) // 2]*2)
@pytest.mark.parametrize('x_dtype', [np.uint8, np.uint16,
np.uint32, np.uint64])
def test_x_decreasing_unsigned(self, x_dtype):
x = np.array([3, 2, 1], dtype=x_dtype)
f = np.array([0, 2, 4])
dfdx = gradient(f, x)
assert_array_equal(dfdx, [-2]*len(x))
@pytest.mark.parametrize('x_dtype', [np.int8, np.int16,
np.int32, np.int64])
def test_x_signed_int_big_jump(self, x_dtype):
minint = np.iinfo(x_dtype).min
maxint = np.iinfo(x_dtype).max
x = np.array([-1, maxint], dtype=x_dtype)
f = np.array([minint // 2, 0])
dfdx = gradient(f, x)
assert_array_equal(dfdx, [0.5, 0.5])
class TestAngle:
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
1, 1j, -1, -1j, 1 - 3j, -1 + 3j]
y = angle(x)
yo = [
np.arctan(3.0 / 1.0),
np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,
-np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]
z = angle(x, deg=True)
zo = np.array(yo) * 180 / np.pi
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
def test_subclass(self):
x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)])
x[1] = np.ma.masked
expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)])
expected[1] = np.ma.masked
actual = angle(x)
assert_equal(type(actual), type(expected))
assert_equal(actual.mask, expected.mask)
assert_equal(actual, expected)
class TestTrimZeros:
"""
Only testing for integer splits.
"""
def test_basic(self):
a = np.array([0, 0, 1, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 2, 3, 4]))
def test_leading_skip(self):
a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 0, 2, 3, 4]))
def test_trailing_skip(self):
a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
class TestExtins:
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
b = extract(a > 1, a)
assert_array_equal(b, [3, 2, 2, 3, 3])
def test_place(self):
# Make sure that non-np.ndarray objects
# raise an error instead of doing nothing
assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1])
a = np.array([1, 4, 3, 2, 5, 8, 7])
place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])
assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])
place(a, np.zeros(7), [])
assert_array_equal(a, np.arange(1, 8))
place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9])
assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9])
assert_raises_regex(ValueError, "Cannot insert from an empty array",
lambda: place(a, [0, 0, 0, 0, 0, 1, 0], []))
# See Issue #6974
a = np.array(['12', '34'])
place(a, [0, 1], '9')
assert_array_equal(a, ['12', '9'])
def test_both(self):
a = rand(10)
mask = a > 0.5
ac = a.copy()
c = extract(mask, a)
place(a, mask, 0)
place(a, mask, c)
assert_array_equal(a, ac)
class TestVectorize:
def test_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_scalar(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], 5)
assert_array_equal(r, [5, 8, 1, 4])
def test_large(self):
x = np.linspace(-3, 2, 10000)
f = vectorize(lambda x: x)
y = f(x)
assert_array_equal(y, x)
def test_ufunc(self):
import math
f = vectorize(math.cos)
args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])
r1 = f(args)
r2 = np.cos(args)
assert_array_almost_equal(r1, r2)
def test_keywords(self):
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(args, 2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords_no_func_code(self):
# This needs to test a function that has keywords but
# no func_code attribute, since otherwise vectorize will
# inspect the func_code.
import random
try:
vectorize(random.randrange) # Should succeed
except Exception:
raise AssertionError()
def test_keywords2_ticket_2100(self):
# Test kwarg support: enhancement ticket 2100
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(a=args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(b=1, a=args)
assert_array_equal(r1, r2)
r1 = f(args, b=2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords3_ticket_2100(self):
# Test excluded with mixed positional and kwargs: ticket 2100
def mypolyval(x, p):
_p = list(p)
res = _p.pop(0)
while _p:
res = res * x + _p.pop(0)
return res
vpolyval = np.vectorize(mypolyval, excluded=['p', 1])
ans = [3, 6]
assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
def test_keywords4_ticket_2100(self):
# Test vectorizing function with no positional args.
@vectorize
def f(**kw):
res = 1.0
for _k in kw:
res *= kw[_k]
return res
assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
def test_keywords5_ticket_2100(self):
# Test vectorizing function with no kwargs args.
@vectorize
def f(*v):
return np.prod(v)
assert_array_equal(f([1, 2], [3, 4]), [3, 8])
def test_coverage1_ticket_2100(self):
def foo():
return 1
f = vectorize(foo)
assert_array_equal(f(), 1)
def test_assigning_docstring(self):
def foo(x):
"""Original documentation"""
return x
f = vectorize(foo)
assert_equal(f.__doc__, foo.__doc__)
doc = "Provided documentation"
f = vectorize(foo, doc=doc)
assert_equal(f.__doc__, doc)
def test_UnboundMethod_ticket_1156(self):
# Regression test for issue 1156
class Foo:
b = 2
def bar(self, a):
return a ** self.b
assert_array_equal(vectorize(Foo().bar)(np.arange(9)),
np.arange(9) ** 2)
assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)),
np.arange(9) ** 2)
def test_execution_order_ticket_1487(self):
# Regression test for dependence on execution order: issue 1487
f1 = vectorize(lambda x: x)
res1a = f1(np.arange(3))
res1b = f1(np.arange(0.1, 3))
f2 = vectorize(lambda x: x)
res2b = f2(np.arange(0.1, 3))
res2a = f2(np.arange(3))
assert_equal(res1a, res2a)
assert_equal(res1b, res2b)
def test_string_ticket_1892(self):
# Test vectorization over strings: issue 1892.
f = np.vectorize(lambda x: x)
s = '0123456789' * 10
assert_equal(s, f(s))
def test_cache(self):
# Ensure that vectorized func called exactly once per argument.
_calls = [0]
@vectorize
def f(x):
_calls[0] += 1
return x ** 2
f.cache = True
x = np.arange(5)
assert_array_equal(f(x), x * x)
assert_equal(_calls[0], len(x))
def test_otypes(self):
f = np.vectorize(lambda x: x)
f.otypes = 'i'
x = np.arange(5)
assert_array_equal(f(x), x)
def test_parse_gufunc_signature(self):
assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()]))
assert_equal(nfb._parse_gufunc_signature('(x,y)->()'),
([('x', 'y')], [()]))
assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'),
([('x',), ('y',)], [()]))
assert_equal(nfb._parse_gufunc_signature('(x)->(y)'),
([('x',)], [('y',)]))
assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'),
([('x',)], [('y',), ()]))
assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x)(y)->()')
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x),(y)->')
with assert_raises(ValueError):
nfb._parse_gufunc_signature('((x))->(x)')
def test_signature_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract, signature='(),()->()')
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_signature_mean_last(self):
def mean(a):
return a.mean()
f = vectorize(mean, signature='(n)->()')
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [2, 3])
def test_signature_center(self):
def center(a):
return a - a.mean()
f = vectorize(center, signature='(n)->(n)')
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [[-1, 1], [-1, 1]])
def test_signature_two_outputs(self):
f = vectorize(lambda x: (x, x), signature='()->(),()')
r = f([1, 2, 3])
assert_(isinstance(r, tuple) and len(r) == 2)
assert_array_equal(r[0], [1, 2, 3])
assert_array_equal(r[1], [1, 2, 3])
def test_signature_outer(self):
f = vectorize(np.outer, signature='(a),(b)->(a,b)')
r = f([1, 2], [1, 2, 3])
assert_array_equal(r, [[1, 2, 3], [2, 4, 6]])
r = f([[[1, 2]]], [1, 2, 3])
assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]])
r = f([[1, 0], [2, 0]], [1, 2, 3])
assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]],
[[2, 4, 6], [0, 0, 0]]])
r = f([1, 2], [[1, 2, 3], [0, 0, 0]])
assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]],
[[0, 0, 0], [0, 0, 0]]])
def test_signature_computed_size(self):
f = vectorize(lambda x: x[:-1], signature='(n)->(m)')
r = f([1, 2, 3])
assert_array_equal(r, [1, 2])
r = f([[1, 2, 3], [2, 3, 4]])
assert_array_equal(r, [[1, 2], [2, 3]])
def test_signature_excluded(self):
def foo(a, b=1):
return a + b
f = vectorize(foo, signature='()->()', excluded={'b'})
assert_array_equal(f([1, 2, 3]), [2, 3, 4])
assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3])
def test_signature_otypes(self):
f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64'])
r = f([1, 2, 3])
assert_equal(r.dtype, np.dtype('float64'))
assert_array_equal(r, [1, 2, 3])
def test_signature_invalid_inputs(self):
f = vectorize(operator.add, signature='(n),(n)->(n)')
with assert_raises_regex(TypeError, 'wrong number of positional'):
f([1, 2])
with assert_raises_regex(
ValueError, 'does not have enough dimensions'):
f(1, 2)
with assert_raises_regex(
ValueError, 'inconsistent size for core dimension'):
f([1, 2], [1, 2, 3])
f = vectorize(operator.add, signature='()->()')
with assert_raises_regex(TypeError, 'wrong number of positional'):
f(1, 2)
def test_signature_invalid_outputs(self):
f = vectorize(lambda x: x[:-1], signature='(n)->(n)')
with assert_raises_regex(
ValueError, 'inconsistent size for core dimension'):
f([1, 2, 3])
f = vectorize(lambda x: x, signature='()->(),()')
with assert_raises_regex(ValueError, 'wrong number of outputs'):
f(1)
f = vectorize(lambda x: (x, x), signature='()->()')
with assert_raises_regex(ValueError, 'wrong number of outputs'):
f([1, 2])
def test_size_zero_output(self):
# see issue 5868
f = np.vectorize(lambda x: x)
x = np.zeros([0, 5], dtype=int)
with assert_raises_regex(ValueError, 'otypes'):
f(x)
f.otypes = 'i'
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='()->()')
with assert_raises_regex(ValueError, 'otypes'):
f(x)
f = np.vectorize(lambda x: x, signature='()->()', otypes='i')
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i')
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='(n)->(n)')
assert_array_equal(f(x.T), x.T)
f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i')
with assert_raises_regex(ValueError, 'new output dimensions'):
f(x)
class TestLeaks:
class A:
iters = 20
def bound(self, *args):
return 0
@staticmethod
def unbound(*args):
return 0
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize('name, incr', [
('bound', A.iters),
('unbound', 0),
])
def test_frompyfunc_leaks(self, name, incr):
# exposed in gh-11867 as np.vectorized, but the problem stems from
# frompyfunc.
# class.attribute = np.frompyfunc(<method>) creates a
# reference cycle if <method> is a bound class method. It requires a
# gc collection cycle to break the cycle (on CPython 3)
import gc
A_func = getattr(self.A, name)
gc.disable()
try:
refcount = sys.getrefcount(A_func)
for i in range(self.A.iters):
a = self.A()
a.f = np.frompyfunc(getattr(a, name), 1, 1)
out = a.f(np.arange(10))
a = None
# A.func is part of a reference cycle if incr is non-zero
assert_equal(sys.getrefcount(A_func), refcount + incr)
for i in range(5):
gc.collect()
assert_equal(sys.getrefcount(A_func), refcount)
finally:
gc.enable()
class TestDigitize:
def test_forward(self):
x = np.arange(-6, 5)
bins = np.arange(-5, 5)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(5, -5, -1)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_random(self):
x = rand(10)
bin = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bin) != 0))
def test_right_basic(self):
x = [1, 5, 4, 10, 8, 11, 0]
bins = [1, 5, 10]
default_answer = [1, 2, 1, 3, 2, 3, 0]
assert_array_equal(digitize(x, bins), default_answer)
right_answer = [0, 1, 1, 2, 2, 3, 0]
assert_array_equal(digitize(x, bins, True), right_answer)
def test_right_open(self):
x = np.arange(-6, 5)
bins = np.arange(-6, 4)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(4, -6, -1)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_random(self):
x = rand(10)
bins = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bins, True) != 10))
def test_monotonic(self):
x = [-1, 0, 1, 2]
bins = [0, 0, 1]
assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3])
assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3])
bins = [1, 1, 0]
assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0])
assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0])
bins = [1, 1, 1, 1]
assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4])
assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4])
bins = [0, 0, 1, 0]
assert_raises(ValueError, digitize, x, bins)
bins = [1, 1, 0, 1]
assert_raises(ValueError, digitize, x, bins)
def test_casting_error(self):
x = [1, 2, 3 + 1.j]
bins = [1, 2, 3]
assert_raises(TypeError, digitize, x, bins)
x, bins = bins, x
assert_raises(TypeError, digitize, x, bins)
def test_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
assert_(not isinstance(digitize(b, a, False), A))
assert_(not isinstance(digitize(b, a, True), A))
def test_large_integers_increasing(self):
# gh-11022
x = 2**54 # loses precision in a float
assert_equal(np.digitize(x, [x - 1, x + 1]), 1)
@pytest.mark.xfail(
reason="gh-11022: np.core.multiarray._monoticity loses precision")
def test_large_integers_decreasing(self):
# gh-11022
x = 2**54 # loses precision in a float
assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
class TestUnwrap:
def test_simple(self):
# check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
# check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
class TestFilterwindows:
def test_hanning(self):
# check symmetry
w = hanning(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
def test_hamming(self):
# check symmetry
w = hamming(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
def test_bartlett(self):
# check symmetry
w = bartlett(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
def test_blackman(self):
# check symmetry
w = blackman(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
class TestTrapz:
def test_simple(self):
x = np.arange(-10, 10, .1)
r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
# check integral of normal equals 1
assert_almost_equal(r, 1, 7)
def test_ndim(self):
x = np.linspace(0, 1, 3)
y = np.linspace(0, 2, 8)
z = np.linspace(0, 3, 13)
wx = np.ones_like(x) * (x[1] - x[0])
wx[0] /= 2
wx[-1] /= 2
wy = np.ones_like(y) * (y[1] - y[0])
wy[0] /= 2
wy[-1] /= 2
wz = np.ones_like(z) * (z[1] - z[0])
wz[0] /= 2
wz[-1] /= 2
q = x[:, None, None] + y[None,:, None] + z[None, None,:]
qx = (q * wx[:, None, None]).sum(axis=0)
qy = (q * wy[None, :, None]).sum(axis=1)
qz = (q * wz[None, None, :]).sum(axis=2)
# n-d `x`
r = trapz(q, x=x[:, None, None], axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y[None,:, None], axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z[None, None,:], axis=2)
assert_almost_equal(r, qz)
# 1-d `x`
r = trapz(q, x=x, axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y, axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z, axis=2)
assert_almost_equal(r, qz)
def test_masked(self):
# Testing that masked arrays behave as if the function is 0 where
# masked
x = np.arange(5)
y = x * x
mask = x == 2
ym = np.ma.array(y, mask=mask)
r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
assert_almost_equal(trapz(ym, x), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(ym, xm), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
class TestSinc:
def test_simple(self):
assert_(sinc(0) == 1)
w = sinc(np.linspace(-1, 1, 100))
# check symmetry
assert_array_almost_equal(w, flipud(w), 7)
def test_array_like(self):
x = [0, 0.5]
y1 = sinc(np.array(x))
y2 = sinc(list(x))
y3 = sinc(tuple(x))
assert_array_equal(y1, y2)
assert_array_equal(y1, y3)
class TestUnique:
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))
assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']
assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget']))
x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
class TestCheckFinite:
def test_simple(self):
a = [1, 2, 3]
b = [1, 2, np.inf]
c = [1, 2, np.nan]
np.lib.asarray_chkfinite(a)
assert_raises(ValueError, np.lib.asarray_chkfinite, b)
assert_raises(ValueError, np.lib.asarray_chkfinite, c)
def test_dtype_order(self):
# Regression test for missing dtype and order arguments
a = [1, 2, 3]
a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64)
assert_(a.dtype == np.float64)
class TestCorrCoef:
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
[0.9297531, 0.32296769, 0.19267156]])
B = np.array(
[[0.10377691, 0.5417086, 0.49807457],
[0.82872117, 0.77801674, 0.39226705],
[0.9314666, 0.66800209, 0.03538394]])
res1 = np.array(
[[1., 0.9379533, -0.04931983],
[0.9379533, 1., 0.30007991],
[-0.04931983, 0.30007991, 1.]])
res2 = np.array(
[[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523],
[0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386],
[-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601],
[0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113],
[0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823],
[0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]])
def test_non_array(self):
assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]),
[[1., -1.], [-1., 1.]])
def test_simple(self):
tgt1 = corrcoef(self.A)
assert_almost_equal(tgt1, self.res1)
assert_(np.all(np.abs(tgt1) <= 1.0))
tgt2 = corrcoef(self.A, self.B)
assert_almost_equal(tgt2, self.res2)
assert_(np.all(np.abs(tgt2) <= 1.0))
def test_ddof(self):
# ddof raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)
sup.filter(DeprecationWarning)
# ddof has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)
def test_bias(self):
# bias raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)
assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)
sup.filter(DeprecationWarning)
# bias has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, bias=1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
res = corrcoef(x)
tgt = np.array([[1., -1.j], [1.j, 1.]])
assert_allclose(res, tgt)
assert_(np.all(np.abs(res) <= 1.0))
def test_xy(self):
x = np.array([[1, 2, 3]])
y = np.array([[1j, 2j, 3j]])
assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]]))
def test_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(corrcoef(np.array([])), np.nan)
assert_array_equal(corrcoef(np.array([]).reshape(0, 2)),
np.array([]).reshape(0, 0))
assert_array_equal(corrcoef(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]))
def test_extreme(self):
x = [[1e-100, 1e100], [1e100, 1e-100]]
with np.errstate(all='raise'):
c = corrcoef(x)
assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]]))
assert_(np.all(np.abs(c) <= 1.0))
class TestCov:
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
frequencies = np.array([1, 4, 1])
x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T
res2 = np.array([[0.4, -0.4], [-0.4, 0.4]])
unit_frequencies = np.ones(3, dtype=np.integer)
weights = np.array([1.0, 4.0, 1.0])
res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]])
unit_weights = np.ones(3)
x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])
def test_basic(self):
assert_allclose(cov(self.x1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
res = np.array([[1., -1.j], [1.j, 1.]])
assert_allclose(cov(x), res)
assert_allclose(cov(x, aweights=np.ones(3)), res)
def test_xy(self):
x = np.array([[1, 2, 3]])
y = np.array([[1j, 2j, 3j]])
assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]]))
def test_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(cov(np.array([])), np.nan)
assert_array_equal(cov(np.array([]).reshape(0, 2)),
np.array([]).reshape(0, 0))
assert_array_equal(cov(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]))
def test_wrong_ddof(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(cov(self.x1, ddof=5),
np.array([[np.inf, -np.inf],
[-np.inf, np.inf]]))
def test_1D_rowvar(self):
assert_allclose(cov(self.x3), cov(self.x3, rowvar=False))
y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])
assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False))
def test_1D_variance(self):
assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))
def test_fweights(self):
assert_allclose(cov(self.x2, fweights=self.frequencies),
cov(self.x2_repeats))
assert_allclose(cov(self.x1, fweights=self.frequencies),
self.res2)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies),
self.res1)
nonint = self.frequencies + 0.5
assert_raises(TypeError, cov, self.x1, fweights=nonint)
f = np.ones((2, 3), dtype=np.integer)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
f = np.ones(2, dtype=np.integer)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
f = -1 * np.ones(3, dtype=np.integer)
assert_raises(ValueError, cov, self.x1, fweights=f)
def test_aweights(self):
assert_allclose(cov(self.x1, aweights=self.weights), self.res3)
assert_allclose(cov(self.x1, aweights=3.0 * self.weights),
cov(self.x1, aweights=self.weights))
assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1)
w = np.ones((2, 3))
assert_raises(RuntimeError, cov, self.x1, aweights=w)
w = np.ones(2)
assert_raises(RuntimeError, cov, self.x1, aweights=w)
w = -1.0 * np.ones(3)
assert_raises(ValueError, cov, self.x1, aweights=w)
def test_unit_fweights_and_aweights(self):
assert_allclose(cov(self.x2, fweights=self.frequencies,
aweights=self.unit_weights),
cov(self.x2_repeats))
assert_allclose(cov(self.x1, fweights=self.frequencies,
aweights=self.unit_weights),
self.res2)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.unit_weights),
self.res1)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.weights),
self.res3)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=3.0 * self.weights),
cov(self.x1, aweights=self.weights))
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.unit_weights),
self.res1)
class Test_I0:
def test_simple(self):
assert_almost_equal(
i0(0.5),
np.array(1.0634833707413234))
A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549])
expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])
assert_almost_equal(i0(A), expected)
assert_almost_equal(i0(-A), expected)
B = np.array([[0.827002, 0.99959078],
[0.89694769, 0.39298162],
[0.37954418, 0.05206293],
[0.36465447, 0.72446427],
[0.48164949, 0.50324519]])
assert_almost_equal(
i0(B),
np.array([[1.17843223, 1.26583466],
[1.21147086, 1.03898290],
[1.03633899, 1.00067775],
[1.03352052, 1.13557954],
[1.05884290, 1.06432317]]))
# Regression test for gh-11205
i0_0 = np.i0([0.])
assert_equal(i0_0.shape, (1,))
assert_array_equal(np.i0([0.]), np.array([1.]))
def test_non_array(self):
a = np.arange(4)
class array_like:
__array_interface__ = a.__array_interface__
def __array_wrap__(self, arr):
return self
# E.g. pandas series survive ufunc calls through array-wrap:
assert isinstance(np.abs(array_like()), array_like)
exp = np.i0(a)
res = np.i0(array_like())
assert_array_equal(exp, res)
class TestKaiser:
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
assert_almost_equal(kaiser(0, 1.0),
np.array([]))
assert_almost_equal(kaiser(2, 1.0),
np.array([0.78984831, 0.78984831]))
assert_almost_equal(kaiser(5, 1.0),
np.array([0.78984831, 0.94503323, 1.,
0.94503323, 0.78984831]))
assert_almost_equal(kaiser(5, 1.56789),
np.array([0.58285404, 0.88409679, 1.,
0.88409679, 0.58285404]))
def test_int_beta(self):
kaiser(3, 4)
class TestMsort:
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
[0.36844147, 0.37325583, 0.96098397],
[0.64864341, 0.52929049, 0.39172155]])
assert_almost_equal(
msort(A),
np.array([[0.36844147, 0.37325583, 0.39172155],
[0.44567325, 0.52929049, 0.54900530],
[0.64864341, 0.79115165, 0.96098397]]))
class TestMeshgrid:
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
assert_array_equal(X, np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]))
assert_array_equal(Y, np.array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]]))
def test_single_input(self):
[X] = meshgrid([1, 2, 3, 4])
assert_array_equal(X, np.array([1, 2, 3, 4]))
def test_no_input(self):
args = []
assert_array_equal([], meshgrid(*args))
assert_array_equal([], meshgrid(*args, copy=False))
def test_indexing(self):
x = [1, 2, 3]
y = [4, 5, 6, 7]
[X, Y] = meshgrid(x, y, indexing='ij')
assert_array_equal(X, np.array([[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]]))
assert_array_equal(Y, np.array([[4, 5, 6, 7],
[4, 5, 6, 7],
[4, 5, 6, 7]]))
# Test expected shapes:
z = [8, 9]
assert_(meshgrid(x, y)[0].shape == (4, 3))
assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4))
assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2))
assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2))
assert_raises(ValueError, meshgrid, x, y, indexing='notvalid')
def test_sparse(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True)
assert_array_equal(X, np.array([[1, 2, 3]]))
assert_array_equal(Y, np.array([[4], [5], [6], [7]]))
def test_invalid_arguments(self):
# Test that meshgrid complains about invalid arguments
# Regression test for issue #4755:
# https://github.com/numpy/numpy/issues/4755
assert_raises(TypeError, meshgrid,
[1, 2, 3], [4, 5, 6, 7], indices='ij')
def test_return_type(self):
# Test for appropriate dtype in returned arrays.
# Regression test for issue #5297
# https://github.com/numpy/numpy/issues/5297
x = np.arange(0, 10, dtype=np.float32)
y = np.arange(10, 20, dtype=np.float64)
X, Y = np.meshgrid(x,y)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
# copy
X, Y = np.meshgrid(x,y, copy=True)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
# sparse
X, Y = np.meshgrid(x,y, sparse=True)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
def test_writeback(self):
# Issue 8561
X = np.array([1.1, 2.2])
Y = np.array([3.3, 4.4])
x, y = np.meshgrid(X, Y, sparse=False, copy=True)
x[0, :] = 0
assert_equal(x[0, :], 0)
assert_equal(x[1, :], X)
class TestPiecewise:
def test_simple(self):
# Condition is single bool list
x = piecewise([0, 0], [True, False], [1])
assert_array_equal(x, [1, 0])
# List of conditions: single bool list
x = piecewise([0, 0], [[True, False]], [1])
assert_array_equal(x, [1, 0])
# Conditions is single bool array
x = piecewise([0, 0], np.array([True, False]), [1])
assert_array_equal(x, [1, 0])
# Condition is single int array
x = piecewise([0, 0], np.array([1, 0]), [1])
assert_array_equal(x, [1, 0])
# List of conditions: int array
x = piecewise([0, 0], [np.array([1, 0])], [1])
assert_array_equal(x, [1, 0])
x = piecewise([0, 0], [[False, True]], [lambda x:-1])
assert_array_equal(x, [0, -1])
assert_raises_regex(ValueError, '1 or 2 functions are expected',
piecewise, [0, 0], [[False, True]], [])
assert_raises_regex(ValueError, '1 or 2 functions are expected',
piecewise, [0, 0], [[False, True]], [1, 2, 3])
def test_two_conditions(self):
x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
assert_array_equal(x, [3, 4])
def test_scalar_domains_three_conditions(self):
x = piecewise(3, [True, False, False], [4, 2, 0])
assert_equal(x, 4)
def test_default(self):
# No value specified for x[1], should be 0
x = piecewise([1, 2], [True, False], [2])
assert_array_equal(x, [2, 0])
# Should set x[1] to 3
x = piecewise([1, 2], [True, False], [2, 3])
assert_array_equal(x, [2, 3])
def test_0d(self):
x = np.array(3)
y = piecewise(x, x > 3, [4, 0])
assert_(y.ndim == 0)
assert_(y == 0)
x = 5
y = piecewise(x, [True, False], [1, 0])
assert_(y.ndim == 0)
assert_(y == 1)
# With 3 ranges (It was failing, before)
y = piecewise(x, [False, False, True], [1, 2, 3])
assert_array_equal(y, 3)
def test_0d_comparison(self):
x = 3
y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.
assert_equal(y, 4)
# With 3 ranges (It was failing, before)
x = 4
y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])
assert_array_equal(y, 2)
assert_raises_regex(ValueError, '2 or 3 functions are expected',
piecewise, x, [x <= 3, x > 3], [1])
assert_raises_regex(ValueError, '2 or 3 functions are expected',
piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1])
def test_0d_0d_condition(self):
x = np.array(3)
c = np.array(x > 3)
y = piecewise(x, [c], [1, 2])
assert_equal(y, 2)
def test_multidimensional_extrafunc(self):
x = np.array([[-2.5, -1.5, -0.5],
[0.5, 1.5, 2.5]])
y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])
assert_array_equal(y, np.array([[-1., -1., -1.],
[3., 3., 1.]]))
class TestBincount:
def test_simple(self):
y = np.bincount(np.arange(4))
assert_array_equal(y, np.ones(4))
def test_simple2(self):
y = np.bincount(np.array([1, 5, 2, 4, 1]))
assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))
def test_simple_weight(self):
x = np.arange(4)
w = np.array([0.2, 0.3, 0.5, 0.1])
y = np.bincount(x, w)
assert_array_equal(y, w)
def test_simple_weight2(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
def test_with_minlength(self):
x = np.array([0, 1, 0, 1, 1])
y = np.bincount(x, minlength=3)
assert_array_equal(y, np.array([2, 3, 0]))
x = []
y = np.bincount(x, minlength=0)
assert_array_equal(y, np.array([]))
def test_with_minlength_smaller_than_maxvalue(self):
x = np.array([0, 1, 1, 2, 2, 3, 3])
y = np.bincount(x, minlength=2)
assert_array_equal(y, np.array([1, 2, 2, 2]))
y = np.bincount(x, minlength=0)
assert_array_equal(y, np.array([1, 2, 2, 2]))
def test_with_minlength_and_weights(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w, 8)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0]))
def test_empty(self):
x = np.array([], dtype=int)
y = np.bincount(x)
assert_array_equal(x, y)
def test_empty_with_minlength(self):
x = np.array([], dtype=int)
y = np.bincount(x, minlength=5)
assert_array_equal(y, np.zeros(5, dtype=int))
def test_with_incorrect_minlength(self):
x = np.array([], dtype=int)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"must not be negative",
lambda: np.bincount(x, minlength=-1))
x = np.arange(5)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"must not be negative",
lambda: np.bincount(x, minlength=-1))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_dtype_reference_leaks(self):
# gh-6805
intp_refcount = sys.getrefcount(np.dtype(np.intp))
double_refcount = sys.getrefcount(np.dtype(np.double))
for j in range(10):
np.bincount([1, 2, 3])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
for j in range(10):
np.bincount([1, 2, 3], [4, 5, 6])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
class TestInterp:
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
assert_raises(ValueError, interp, 0, [0], [1, 2])
assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
assert_raises(ValueError, interp, 0, [], [], period=360)
assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
def test_basic(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.linspace(0, 1, 50)
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
# Needs range of sizes to test different code paths.
# size ==1 is special cased, 1 < size < 5 is linear search, and
# size >= 5 goes through local search and possibly binary search.
for size in range(1, 10):
xp = np.arange(size, dtype=np.double)
yp = np.ones(size, dtype=np.double)
incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
decpts = incpts[::-1]
incres = interp(incpts, xp, yp)
decres = interp(decpts, xp, yp)
inctgt = np.array([1, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0)
decres = interp(decpts, xp, yp, left=0)
inctgt = np.array([0, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, right=2)
decres = interp(decpts, xp, yp, right=2)
inctgt = np.array([1, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0, right=2)
decres = interp(decpts, xp, yp, left=0, right=2)
inctgt = np.array([0, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = 0
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = .3
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float32(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float64(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
def test_non_finite_behavior_exact_x(self):
x = [1, 2, 2.5, 3, 4]
xp = [1, 2, 3, 4]
fp = [1, 2, np.inf, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
fp = [1, 2, np.nan, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
@pytest.fixture(params=[
lambda x: np.float_(x),
lambda x: _make_complex(x, 0),
lambda x: _make_complex(0, x),
lambda x: _make_complex(x, np.multiply(x, -2))
], ids=[
'real',
'complex-real',
'complex-imag',
'complex-both'
])
def sc(self, request):
""" scale function used by the below tests """
return request.param
def test_non_finite_any_nan(self, sc):
""" test that nans are propagated """
assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan))
def test_non_finite_inf(self, sc):
""" Test that interp between opposite infs gives nan """
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan))
# unless the y values are equal
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10))
def test_non_finite_half_inf_xf(self, sc):
""" Test that interp where both axes have a bound at inf gives nan """
assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan))
def test_non_finite_half_inf_x(self, sc):
""" Test interp where the x axis has a bound at inf """
assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0))
assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
def test_non_finite_half_inf_f(self, sc):
""" Test interp where the f axis has a bound at inf """
assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf))
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j
x0 = 0.3
y0 = x0 + (1+x0)*1.0j
assert_almost_equal(np.interp(x0, x, y), y0)
# test complex left and right
x0 = -1
left = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, left=left), left)
x0 = 2.0
right = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, right=right), right)
# test complex non finite
x = [1, 2, 2.5, 3, 4]
xp = [1, 2, 3, 4]
fp = [1, 2+1j, np.inf, 4]
y = [1, 2+1j, np.inf+0.5j, np.inf, 4]
assert_almost_equal(np.interp(x, xp, fp), y)
# test complex periodic
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5+1.0j, 10+2j, 3+3j, 4+4j]
y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j,
3.5+3.5j, 3.75+3.75j]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def test_zero_dimensional_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.array(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
xp = np.array([0, 2, 4])
fp = np.array([1, -1, 1])
actual = np.interp(np.array(1), xp, fp)
assert_equal(actual, 0)
assert_(isinstance(actual, np.float64))
actual = np.interp(np.array(4.5), xp, fp, period=4)
assert_equal(actual, 0.5)
assert_(isinstance(actual, np.float64))
def test_if_len_x_is_small(self):
xp = np.arange(0, 10, 0.0001)
fp = np.sin(xp)
assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)
def test_period(self):
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5, 10, 3, 4]
y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
x = np.array(x, order='F').reshape(2, -1)
y = np.array(y, order='C').reshape(2, -1)
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
class TestPercentile:
def test_basic(self):
x = np.arange(8) * 0.5
assert_equal(np.percentile(x, 0), 0.)
assert_equal(np.percentile(x, 100), 3.5)
assert_equal(np.percentile(x, 50), 1.75)
x[1] = np.nan
assert_equal(np.percentile(x, 0), np.nan)
assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
def test_fraction(self):
x = [Fraction(i, 2) for i in range(8)]
p = np.percentile(x, Fraction(0))
assert_equal(p, Fraction(0))
assert_equal(type(p), Fraction)
p = np.percentile(x, Fraction(100))
assert_equal(p, Fraction(7, 2))
assert_equal(type(p), Fraction)
p = np.percentile(x, Fraction(50))
assert_equal(p, Fraction(7, 4))
assert_equal(type(p), Fraction)
def test_api(self):
d = np.ones(5)
np.percentile(d, 5, None, None, False)
np.percentile(d, 5, None, None, False, 'linear')
o = np.ones((1,))
np.percentile(d, 5, None, o, False, 'linear')
def test_2D(self):
x = np.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
def test_linear(self):
# Test defaults
assert_equal(np.percentile(range(10), 50), 4.5)
# explicitly specify interpolation_method 'linear' (the default)
assert_equal(np.percentile(range(10), 50,
interpolation='linear'), 4.5)
def test_lower_higher(self):
# interpolation_method 'lower'/'higher'
assert_equal(np.percentile(range(10), 50,
interpolation='lower'), 4)
assert_equal(np.percentile(range(10), 50,
interpolation='higher'), 5)
def test_midpoint(self):
assert_equal(np.percentile(range(10), 51,
interpolation='midpoint'), 4.5)
assert_equal(np.percentile(range(11), 51,
interpolation='midpoint'), 5.5)
assert_equal(np.percentile(range(11), 50,
interpolation='midpoint'), 5)
def test_nearest(self):
assert_equal(np.percentile(range(10), 51,
interpolation='nearest'), 5)
assert_equal(np.percentile(range(10), 49,
interpolation='nearest'), 4)
def test_sequence(self):
x = np.arange(8) * 0.5
assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75])
def test_axis(self):
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]]
assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T)
# ensure qth axis is always first as with np.array(old_percentile(..))
x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
assert_equal(np.percentile(x, (25, 50)).shape, (2,))
assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,))
assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6))
assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5))
assert_equal(
np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50),
interpolation="higher").shape, (2,))
assert_equal(np.percentile(x, (25, 50, 75),
interpolation="higher").shape, (3,))
assert_equal(np.percentile(x, (25, 50), axis=0,
interpolation="higher").shape, (2, 4, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=1,
interpolation="higher").shape, (2, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=2,
interpolation="higher").shape, (2, 3, 4, 6))
assert_equal(np.percentile(x, (25, 50), axis=3,
interpolation="higher").shape, (2, 3, 4, 5))
assert_equal(np.percentile(x, (25, 50, 75), axis=1,
interpolation="higher").shape, (3, 3, 5, 6))
def test_scalar_q(self):
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50), 5.5)
assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
assert_equal(np.percentile(x, 50, axis=0), r0)
assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
r1 = np.array([1.5, 5.5, 9.5])
assert_almost_equal(np.percentile(x, 50, axis=1), r1)
assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape)
out = np.empty(1)
assert_equal(np.percentile(x, 50, out=out), 5.5)
assert_equal(out, 5.5)
out = np.empty(4)
assert_equal(np.percentile(x, 50, axis=0, out=out), r0)
assert_equal(out, r0)
out = np.empty(3)
assert_equal(np.percentile(x, 50, axis=1, out=out), r1)
assert_equal(out, r1)
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50, interpolation='lower'), 5.)
assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
c0 = np.percentile(x, 50, interpolation='lower', axis=0)
assert_equal(c0, r0)
assert_equal(c0.shape, r0.shape)
r1 = np.array([1., 5., 9.])
c1 = np.percentile(x, 50, interpolation='lower', axis=1)
assert_almost_equal(c1, r1)
assert_equal(c1.shape, r1.shape)
out = np.empty((), dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', out=out)
assert_equal(c, 5)
assert_equal(out, 5)
out = np.empty(4, dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', axis=0, out=out)
assert_equal(c, r0)
assert_equal(out, r0)
out = np.empty(3, dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', axis=1, out=out)
assert_equal(c, r1)
assert_equal(out, r1)
def test_exception(self):
assert_raises(ValueError, np.percentile, [1, 2], 56,
interpolation='foobar')
assert_raises(ValueError, np.percentile, [1], 101)
assert_raises(ValueError, np.percentile, [1], -1)
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101])
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1])
def test_percentile_list(self):
assert_equal(np.percentile([1, 2, 3], 0), 1)
def test_percentile_out(self):
x = np.array([1, 2, 3])
y = np.zeros((3,))
p = (1, 2, 3)
np.percentile(x, p, out=y)
assert_equal(y, np.percentile(x, p))
x = np.array([[1, 2, 3],
[4, 5, 6]])
y = np.zeros((3, 3))
np.percentile(x, p, axis=0, out=y)
assert_equal(y, np.percentile(x, p, axis=0))
y = np.zeros((3, 2))
np.percentile(x, p, axis=1, out=y)
assert_equal(y, np.percentile(x, p, axis=1))
x = np.arange(12).reshape(3, 4)
# q.dim > 1, float
r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]])
out = np.empty((2, 4))
assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0)
assert_equal(out, r0)
r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]])
out = np.empty((2, 3))
assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1)
assert_equal(out, r1)
# q.dim > 1, int
r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])
out = np.empty((2, 4), dtype=x.dtype)
c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out)
assert_equal(c, r0)
assert_equal(out, r0)
r1 = np.array([[0, 4, 8], [1, 5, 9]])
out = np.empty((2, 3), dtype=x.dtype)
c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out)
assert_equal(c, r1)
assert_equal(out, r1)
def test_percentile_empty_dim(self):
# empty dims are preserved
d = np.arange(11 * 2).reshape(11, 1, 2, 1)
assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2))
assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2))
assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=2,
interpolation='midpoint').shape,
(11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=-2,
interpolation='midpoint').shape,
(11, 1, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape,
(2, 1, 2, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape,
(2, 11, 2, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape,
(2, 11, 1, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape,
(2, 11, 1, 2))
def test_percentile_no_overwrite(self):
a = np.array([2, 3, 4, 1])
np.percentile(a, [50], overwrite_input=False)
assert_equal(a, np.array([2, 3, 4, 1]))
a = np.array([2, 3, 4, 1])
np.percentile(a, [50])
assert_equal(a, np.array([2, 3, 4, 1]))
def test_no_p_overwrite(self):
p = np.linspace(0., 100., num=5)
np.percentile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, np.linspace(0., 100., num=5))
p = np.linspace(0., 100., num=5).tolist()
np.percentile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, np.linspace(0., 100., num=5).tolist())
def test_percentile_overwrite(self):
a = np.array([2, 3, 4, 1])
b = np.percentile(a, [50], overwrite_input=True)
assert_equal(b, np.array([2.5]))
b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)
assert_equal(b, np.array([2.5]))
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
x = np.moveaxis(x, -1, 0)
assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)),
np.percentile(x, [25, 60], axis=None))
assert_equal(np.percentile(x, [25, 60], axis=(0,)),
np.percentile(x, [25, 60], axis=0))
d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
np.random.shuffle(d.ravel())
assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0],
np.percentile(d[:,:,:, 0].flatten(), 25))
assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1],
np.percentile(d[:,:, 1,:].flatten(), [10, 90]))
assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2],
np.percentile(d[:,:, 2,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2],
np.percentile(d[2,:,:,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1],
np.percentile(d[2, 1,:,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1],
np.percentile(d[2,:,:, 1].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2],
np.percentile(d[2,:, 2,:].flatten(), 25))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25)
assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25)
assert_raises(np.AxisError, np.percentile, d, axis=4, q=25)
assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25)
# each of these refers to the same axis twice
assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25)
assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25)
assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25)
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3),
keepdims=True).shape, (2, 1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 3),
keepdims=True).shape, (2, 1, 5, 7, 1))
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
assert_equal(np.percentile(d, 0, 0, out=o), o)
assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o)
o = np.zeros((3,))
assert_equal(np.percentile(d, 1, 1, out=o), o)
assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o)
o = np.zeros(())
assert_equal(np.percentile(d, 2, out=o), o)
assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o)
def test_out_nan(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', RuntimeWarning)
o = np.zeros((4,))
d = np.ones((3, 4))
d[2, 1] = np.nan
assert_equal(np.percentile(d, 0, 0, out=o), o)
assert_equal(
np.percentile(d, 0, 0, interpolation='nearest', out=o), o)
o = np.zeros((3,))
assert_equal(np.percentile(d, 1, 1, out=o), o)
assert_equal(
np.percentile(d, 1, 1, interpolation='nearest', out=o), o)
o = np.zeros(())
assert_equal(np.percentile(d, 1, out=o), o)
assert_equal(
np.percentile(d, 1, interpolation='nearest', out=o), o)
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
assert_equal(np.percentile(a, 0.3), np.nan)
assert_equal(np.percentile(a, 0.3, axis=0), np.nan)
assert_equal(np.percentile(a, [0.3, 0.6], axis=0),
np.array([np.nan] * 2))
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
assert_equal(np.percentile(a, 0.3), np.nan)
assert_equal(np.percentile(a, 0.3).ndim, 0)
# axis0 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.percentile(a, 0.3, 0), b)
# axis0 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], 0)
b[:, 2, 3] = np.nan
b[:, 1, 2] = np.nan
assert_equal(np.percentile(a, [0.3, 0.6], 0), b)
# axis1 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.percentile(a, 0.3, 1), b)
# axis1 not zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1)
b[:, 1, 3] = np.nan
b[:, 1, 2] = np.nan
assert_equal(np.percentile(a, [0.3, 0.6], 1), b)
# axis02 zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2))
b[1] = np.nan
b[2] = np.nan
assert_equal(np.percentile(a, 0.3, (0, 2)), b)
# axis02 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2))
b[:, 1] = np.nan
b[:, 2] = np.nan
assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)
# axis02 not zerod with nearest interpolation
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2), interpolation='nearest')
b[:, 1] = np.nan
b[:, 2] = np.nan
assert_equal(np.percentile(
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
class TestQuantile:
# most of this is already tested by TestPercentile
def test_basic(self):
x = np.arange(8) * 0.5
assert_equal(np.quantile(x, 0), 0.)
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
def test_fraction(self):
# fractional input, integral quantile
x = [Fraction(i, 2) for i in range(8)]
q = np.quantile(x, 0)
assert_equal(q, 0)
assert_equal(type(q), Fraction)
q = np.quantile(x, 1)
assert_equal(q, Fraction(7, 2))
assert_equal(type(q), Fraction)
q = np.quantile(x, Fraction(1, 2))
assert_equal(q, Fraction(7, 4))
assert_equal(type(q), Fraction)
# repeat with integral input but fractional quantile
x = np.arange(8)
assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2))
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
p = p0.copy()
np.quantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
p0 = p0.tolist()
p = p.tolist()
np.quantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
class TestMedian:
def test_basic(self):
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_equal(np.median(a0), 1)
assert_allclose(np.median(a1), 0.5)
assert_allclose(np.median(a2), 2.5)
assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])
assert_equal(np.median(a2, axis=1), [1, 4])
assert_allclose(np.median(a2, axis=None), 2.5)
a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])
assert_almost_equal((a[1] + a[3]) / 2., np.median(a))
a = np.array([0.0463301, 0.0444502, 0.141249])
assert_equal(a[0], np.median(a))
a = np.array([0.0444502, 0.141249, 0.0463301])
assert_equal(a[-1], np.median(a))
# check array scalar result
assert_equal(np.median(a).ndim, 0)
a[1] = np.nan
assert_equal(np.median(a).ndim, 0)
def test_axis_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
orig = a.copy()
np.median(a, axis=None)
for ax in range(a.ndim):
np.median(a, axis=ax)
assert_array_equal(a, orig)
assert_allclose(np.median(a3, axis=0), [3, 4])
assert_allclose(np.median(a3.T, axis=1), [3, 4])
assert_allclose(np.median(a3), 3.5)
assert_allclose(np.median(a3, axis=None), 3.5)
assert_allclose(np.median(a3.T), 3.5)
def test_overwrite_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)
assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)
assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)
assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0),
[1.5, 2.5, 3.5])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)
assert_allclose(
np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])
assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1),
[3, 4])
a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))
np.random.shuffle(a4.ravel())
assert_allclose(np.median(a4, axis=None),
np.median(a4.copy(), axis=None, overwrite_input=True))
assert_allclose(np.median(a4, axis=0),
np.median(a4.copy(), axis=0, overwrite_input=True))
assert_allclose(np.median(a4, axis=1),
np.median(a4.copy(), axis=1, overwrite_input=True))
assert_allclose(np.median(a4, axis=2),
np.median(a4.copy(), axis=2, overwrite_input=True))
def test_array_like(self):
x = [1, 2, 3]
assert_almost_equal(np.median(x), 2)
x2 = [x]
assert_almost_equal(np.median(x2), 2)
assert_allclose(np.median(x2, axis=0), x)
def test_subclass(self):
# gh-3846
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def mean(self, axis=None, dtype=None, out=None):
return -7
a = MySubClass([1, 2, 3])
assert_equal(np.median(a), -7)
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_out_nan(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', RuntimeWarning)
o = np.zeros((4,))
d = np.ones((3, 4))
d[2, 1] = np.nan
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a, axis=0), np.nan)
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a).ndim, 0)
# axis0
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.median(a, 0), b)
# axis1
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.median(a, 1), b)
# axis02
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))
b[1] = np.nan
b[2] = np.nan
assert_equal(np.median(a, (0, 2)), b)
def test_empty(self):
# mean(empty array) emits two warnings: empty slice and divide by 0
a = np.array([], dtype=float)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
assert_equal(len(w), 2)
# multiple dimensions
a = np.array([], dtype=float, ndmin=3)
# no axis
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
# axis 0 and 1
b = np.array([], dtype=float, ndmin=2)
assert_equal(np.median(a, axis=0), b)
assert_equal(np.median(a, axis=1), b)
# axis 2
b = np.array(np.nan, dtype=float, ndmin=2)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a, axis=2), b)
assert_(w[0].category is RuntimeWarning)
def test_object(self):
o = np.arange(7.)
assert_(type(np.median(o.astype(object))), float)
o[2] = np.nan
assert_(type(np.median(o.astype(object))), float)
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.median(x, axis=(0, 1)), np.median(o))
x = np.moveaxis(x, -1, 0)
assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
x = x.swapaxes(0, 1).copy()
assert_equal(np.median(x, axis=(0, -1)), np.median(o))
assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None))
assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0))
assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1))
d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
np.random.shuffle(d.ravel())
assert_equal(np.median(d, axis=(0, 1, 2))[0],
np.median(d[:,:,:, 0].flatten()))
assert_equal(np.median(d, axis=(0, 1, 3))[1],
np.median(d[:,:, 1,:].flatten()))
assert_equal(np.median(d, axis=(3, 1, -4))[2],
np.median(d[:,:, 2,:].flatten()))
assert_equal(np.median(d, axis=(3, 1, 2))[2],
np.median(d[2,:,:,:].flatten()))
assert_equal(np.median(d, axis=(3, 2))[2, 1],
np.median(d[2, 1,:,:].flatten()))
assert_equal(np.median(d, axis=(1, -2))[2, 1],
np.median(d[2,:,:, 1].flatten()))
assert_equal(np.median(d, axis=(1, 3))[2, 2],
np.median(d[2,:, 2,:].flatten()))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.median, d, axis=-5)
assert_raises(np.AxisError, np.median, d, axis=(0, -5))
assert_raises(np.AxisError, np.median, d, axis=4)
assert_raises(np.AxisError, np.median, d, axis=(0, 4))
assert_raises(ValueError, np.median, d, axis=(1, 1))
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.median(d, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
class TestAdd_newdoc_ufunc:
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah")
def test_string_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
class TestAdd_newdoc:
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
class TestSortComplex:
@pytest.mark.parametrize("type_in, type_out", [
('l', 'D'),
('h', 'F'),
('H', 'F'),
('b', 'F'),
('B', 'F'),
('g', 'G'),
])
def test_sort_real(self, type_in, type_out):
# sort_complex() type casting for real input types
a = np.array([5, 3, 6, 2, 1], dtype=type_in)
actual = np.sort_complex(a)
expected = np.sort(a).astype(type_out)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
def test_sort_complex(self):
# sort_complex() handling of complex input
a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')
expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')
actual = np.sort_complex(a)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
| 37.295316 | 102 | 0.523115 |
7953de221e02bf9fa1a466555a945a2263e32b34 | 8,965 | py | Python | suunto_exercise_data.py | Snijderfrey/suunto2python | 704bc046221d6809dc3bdd2118b34927a192600f | [
"MIT"
] | 7 | 2020-09-10T06:41:24.000Z | 2022-01-15T22:25:53.000Z | suunto_exercise_data.py | Snijderfrey/suunto2python | 704bc046221d6809dc3bdd2118b34927a192600f | [
"MIT"
] | null | null | null | suunto_exercise_data.py | Snijderfrey/suunto2python | 704bc046221d6809dc3bdd2118b34927a192600f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import zipfile
import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
class exercise_data:
"""
Imports data recorded by a Suunto Ambit 3 Peak into a Pandas DataFrame.
Currently, the data is imported best from the zip file found in the Suunto
App folder or from the json file found in the zip file. Generally, an
import of json files exported from quantified-self.io is also possible,
however implemented only very rudimentary (might be expanded in the
future). The data is stored in the Pandas DataDrame self.exercise_data.
"""
def __init__(self, file, mode='suunto_json'):
"""
Initialize instance of exercise_data.
Some calculation on the data are performed directly after import.
Parameters
----------
file : str
Path to the file to be imported.
mode : str, optional
Gives the file origin of the imported json file. Allowed values are
'suunto_json' for json files from the Suunto App folder extracted
from the corresponding zip files, 'suunto_zip' for zip files found
in the Suunto App folder and 'qs_json' for files exported from
quantified-self.io. The default is 'suunto_json'.
Returns
-------
None.
"""
self.file = file
self.mode = mode
self.import_modes = np.array(['suunto_json', 'suunto_zip', 'qs_json'])
if self.mode in self.import_modes[[0, 2]]: # the json file modes
self.summary_raw_data = None
with open(self.file, 'r') as exercise_file:
self.exercise_raw_data = exercise_file.read()
elif self.mode == 'suunto_zip':
zip_data = zipfile.ZipFile(self.file, 'r')
self.summary_raw_data = json.loads(zip_data.read('summary.json'))[
'Samples']
self.exercise_raw_data = zip_data.read('samples.json')
else:
raise ValueError('No valid mode given. Allowed values must be in '
'{}.'.format(self.import_modes))
self.parse_sample_data()
self.parse_summary()
# Some values are calculated from the raw data.
if ('baro', 'Speed') in self.exercise_data.columns:
self.exercise_data[('gps', 'Pace')] = 1/self.exercise_data[
('baro', 'Speed')]*1000/60
if ('baro', 'Cadence') in self.exercise_data.columns:
self.exercise_data[('baro', 'Cadence')] *= 60
def parse_sample_data(self):
"""
Import the json file and parse it into a Pandas DataFrame.
(currently only for self.mode=='suunto_json' and 'suunto_zip', for
'qs_json', basically only the raw data is imported). The data is
stored in self.exercise_data. Unparsed data is stored in
self.unparsed_data and can be inspected for possibly disregarded data.
Returns
-------
None.
"""
if self.mode in self.import_modes[0:2]: # 'suunto_json', 'suunto_zip'
self.exercise_raw_data = np.array(
json.loads(self.exercise_raw_data)['Samples'])
# interbeat interval (ibi) is collected in lists together with
# timestamp
ibi_time = []
ibi_values = []
baro_time = []
baro_data = []
gps_time = []
gps_data = []
processed_samples = []
for curr_index, curr_sample in enumerate(self.exercise_raw_data):
if 'R-R' in curr_sample['Attributes']['suunto/sml']:
ibi_values.append(
curr_sample['Attributes']['suunto/sml']['R-R']['IBI'])
ibi_time.append(curr_sample['TimeISO8601'])
processed_samples.append(curr_index)
elif 'Sample' in curr_sample['Attributes']['suunto/sml']:
if 'AbsPressure' in curr_sample['Attributes'][
'suunto/sml']['Sample']:
baro_data.append(
curr_sample['Attributes']['suunto/sml']['Sample'])
baro_time.append(curr_sample['TimeISO8601'])
processed_samples.append(curr_index)
if 'Latitude' in curr_sample['Attributes']['suunto/sml'][
'Sample']:
gps_data.append(
curr_sample['Attributes']['suunto/sml']['Sample'])
gps_time.append(curr_sample['TimeISO8601'])
processed_samples.append(curr_index)
ibi = pd.DataFrame(ibi_values, index=pd.to_datetime(ibi_time))
if ibi_values:
ibi_cumsum = pd.to_timedelta(ibi.stack().cumsum(), unit='ms')
ibi_timeindex = pd.to_datetime(
ibi.index[0] -
pd.Timedelta(ibi.iloc[0].sum(), unit='ms') +
pd.to_timedelta(ibi_cumsum, unit='ms'))
self.ibi_1d = pd.Series(
ibi.stack().values, index=ibi_timeindex.round('S'))
index_array = np.ones_like(self.ibi_1d)
multi_index = pd.MultiIndex.from_arrays(
[self.ibi_1d.index, index_array],
names=('time', 'data_point'))
duplicate_indices = multi_index.duplicated(keep='first')
while True in duplicate_indices:
index_array += duplicate_indices
multi_index = pd.MultiIndex.from_arrays(
[self.ibi_1d.index, index_array.astype(int)],
names=('time', 'data_point'))
duplicate_indices = multi_index.duplicated(keep='first')
ibi = self.ibi_1d
ibi.index = multi_index
ibi = ibi.unstack()
ibi.columns = pd.MultiIndex.from_product(
[['IBI_raw'], ibi.columns])
baro = pd.DataFrame(
baro_data, index=pd.to_datetime(baro_time).round(freq='S'))
baro.columns = pd.MultiIndex.from_product([['baro'], baro.columns])
gps = pd.DataFrame(
gps_data, index=pd.to_datetime(gps_time).round(freq='S'))
gps.columns = pd.MultiIndex.from_product([['gps'], gps.columns])
self.exercise_data = baro
for ii in [gps, ibi]:
if len(ii) > 0:
self.exercise_data = self.exercise_data.join(ii)
self.exercise_data = self.exercise_data[
~self.exercise_data.index.duplicated(keep='first')]
self.unparsed_lines = len(self.exercise_raw_data) - len(
processed_samples)
self.processed_samples_mask = np.ones_like(
self.exercise_raw_data, dtype=bool)
self.processed_samples_mask[processed_samples] = False
self.unparsed_data = self.exercise_raw_data[
self.processed_samples_mask]
elif self.mode == self.import_modes[2]: # 'qs_json'
# currently very rudimentary
self.exercise_raw_data = json.loads(self.exercise_raw_data)
self.ibi_values = np.array(
self.exercise_raw_data['activities'][0]['streams'][6]['data'])
else:
raise ValueError(
'No valid mode entered. Allowed modes are {}'.format(
self.import_modes))
def parse_summary(self):
"""
Generate a summary of the exercise data.
Currently only done if the mode is 'suunto_zip' because in this case,
the summary data generated by the Suunto App/watch is used.
Returns
-------
None.
"""
if self.mode == 'suunto_zip':
# self.exercise_summary = []
# for curr_data in self.summary_raw_data[0:-1]:
# self.exercise_summary.append(
# pd.Series(curr_data['Attributes']['suunto/sml']['Windows'][0]))
# self.exercise_summary.append(
# pd.Series(self.summary_raw_data[-1]['Attributes']['suunto/sml']['Header']))
# self.exercise_summary = pd.concat(self.exercise_summary, axis=1)
self.exercise_summary = pd.Series(
self.summary_raw_data[-1]['Attributes']['suunto/sml']['Header']
)
else:
self.exercise_summary = None
class training_diary:
def __init__(self, folder, mode='suunto_zip'):
self.folder = folder
self.training_data_files = glob.glob(self.folder + '*.zip')
self.training_data = []
for curr_file in tqdm(self.training_data_files):
self.training_data.append(
exercise_data(curr_file, mode='suunto_zip'))
| 41.123853 | 93 | 0.565533 |
7953de8d729715e06067b76f035c0d56660d53cb | 2,125 | py | Python | contrib/runners/noop_runner/noop_runner/noop_runner.py | nickbaum/st2 | 21c01c7c8c0f511ee75e3b2a3a03502472281058 | [
"Apache-2.0"
] | 1 | 2020-11-09T21:05:33.000Z | 2020-11-09T21:05:33.000Z | contrib/runners/noop_runner/noop_runner/noop_runner.py | ellerbrock/st2 | b3a0d9f82053c1fd5adb616dc8331bad427cd11f | [
"Apache-2.0"
] | 3 | 2021-03-26T00:29:52.000Z | 2021-03-26T00:34:45.000Z | contrib/runners/noop_runner/noop_runner/noop_runner.py | ellerbrock/st2 | b3a0d9f82053c1fd5adb616dc8331bad427cd11f | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import uuid
from st2common import log as logging
from st2common.runners.base import ActionRunner
from st2common.runners.base import get_metadata as get_runner_metadata
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
import st2common.util.jsonify as jsonify
__all__ = [
'NoopRunner',
'get_runner',
'get_metadata'
]
LOG = logging.getLogger(__name__)
class NoopRunner(ActionRunner):
"""
Runner which does absolutely nothing. No-op action.
"""
KEYS_TO_TRANSFORM = ['stdout', 'stderr']
def __init__(self, runner_id):
super(NoopRunner, self).__init__(runner_id=runner_id)
def pre_run(self):
super(NoopRunner, self).pre_run()
def run(self, action_parameters):
LOG.info('Executing action via NoopRunner: %s', self.runner_id)
LOG.info('[Action info] name: %s, Id: %s',
self.action_name, str(self.execution_id))
result = {
'failed': False,
'succeeded': True,
'return_code': 0,
}
status = LIVEACTION_STATUS_SUCCEEDED
return (status, jsonify.json_loads(result, NoopRunner.KEYS_TO_TRANSFORM), None)
def get_runner():
return NoopRunner(str(uuid.uuid4()))
def get_metadata():
return get_runner_metadata('noop_runner')[0]
| 30.797101 | 87 | 0.716235 |
7953de9ad5ee488b2fa6d14acdc5b5ba07fc7c1a | 4,772 | py | Python | src/offline_RL/dcql_roundabout.py | hougiebear/Deepdrive-Autonomous-Vehicles | 6b952c9e5d01893dc4319bbd74b9fa951719fcf9 | [
"MIT"
] | 1 | 2021-12-27T02:22:27.000Z | 2021-12-27T02:22:27.000Z | src/offline_RL/dcql_roundabout.py | hougiebear/Deepdrive-Autonomous-Vehicles | 6b952c9e5d01893dc4319bbd74b9fa951719fcf9 | [
"MIT"
] | null | null | null | src/offline_RL/dcql_roundabout.py | hougiebear/Deepdrive-Autonomous-Vehicles | 6b952c9e5d01893dc4319bbd74b9fa951719fcf9 | [
"MIT"
] | null | null | null | import gym
import highway_env
import numpy as np
import gym
from d3rlpy.algos import DQN
from d3rlpy.online.buffers import ReplayBuffer
from d3rlpy.online.explorers import LinearDecayEpsilonGreedy
import d3rlpy
from d3rlpy.wrappers.sb3 import to_mdp_dataset
import torch.nn as nn
import torch
from d3rlpy.algos import DiscreteCQL
from d3rlpy.metrics.scorer import evaluate_on_environment
from d3rlpy.metrics.scorer import td_error_scorer
from d3rlpy.metrics.scorer import discounted_sum_of_advantage_scorer
from d3rlpy.metrics.scorer import average_value_estimation_scorer
from sklearn.model_selection import train_test_split
from d3rlpy.dataset import MDPDataset
from d3rlpy.models.q_functions import QRQFunctionFactory
import gym.spaces as spaces
from gym import ObservationWrapper
class FlattenObservation(ObservationWrapper):
r"""Observation wrapper that flattens the observation."""
def __init__(self, env):
super(FlattenObservation, self).__init__(env)
self.observation_space = spaces.flatten_space(env.observation_space)
def observation(self, observation):
return spaces.flatten(self.env.observation_space, observation)
params = {
"environment": "roundabout-v0",
"model_name": "CQL-QRDQN",
"train_steps": 200000,
"buffer_size": 100000,
"batch_size": 32,
"gamma": 0.99,
"target_update_interval": 15000,
"train_freq": 256,
"gradient_steps": -1,
"exploration_fraction": 0.0943543691594673,
"exploration_final_eps": 0.170467871171168,
"learning_rate": 0.000403307327179189,
"learning_starts": 10000,
"policy": "MlpPolicy",
"test_episodes": 10000,
"n_quantiles": 192
}
env = gym.make(params.get("environment"))
eval_env = gym.make(params.get("environment"))
env = FlattenObservation(env)
eval_env = FlattenObservation(env)
exp_name = params.get("model_name") + "_online_" + params.get("environment")
log_dir = '../../../logs/' + exp_name
pretrain = False
def train(params):
# setup algorithm
# setup algorithm
if pretrain:
dqn = DQN(batch_size=params.get("batch_size"),
learning_rate=params.get("learning_rate"),
target_update_interval=params.get("target_update_interval"),
q_func_factory=QRQFunctionFactory(n_quantiles=params.get("n_quantiles")),
n_steps=params.get("train_freq"),
gamma=params.get("gamma"),
n_critics=1,
target_reduction_type="min",
use_gpu=True)
# setup replay buffer
buffer = ReplayBuffer(maxlen=params.get("buffer_size"), env=env)
# setup explorers
explorer = LinearDecayEpsilonGreedy(start_epsilon=1.0,
end_epsilon=params.get("exploration_final_eps"),
duration=100000)
# start training
dqn.fit_online(env,
buffer,
n_steps=params.get("train_steps"),
explorer=explorer, # you don't need this with probablistic policy algorithms
tensorboard_dir=log_dir,
eval_env=eval_env)
print("Saving Model")
dqn.save_model(exp_name)
print("convert buffer to dataset")
dataset = buffer.to_mdp_dataset()
# save MDPDataset
dataset.dump('{0}.h5'.format(exp_name))
print("Loading Dataset for Offline Training")
dataset = d3rlpy.dataset.MDPDataset.load('{0}.h5'.format(exp_name))
train_episodes, test_episodes = train_test_split(dataset, test_size=0.2)
# The dataset can then be used to train a d3rlpy model
cql = DiscreteCQL(
learning_rate=6.25e-05,
encoder_factory='default',
q_func_factory='mean',
batch_size=32,
n_frames=1,
n_steps=1,
gamma=0.99,
n_critics=1,
bootstrap=False,
share_encoder=False,
target_reduction_type='min',
target_update_interval=8000,
use_gpu=True,
scaler=None,
augmentation=None,
generator=None,
impl=None)
cql_exp = params.get("model_name") + "_offline_" + params.get("environment")
cql_log = '../../../logs/' + cql_exp
cql.fit(dataset.episodes,
eval_episodes=test_episodes,
n_epochs=1000,
scorers={
'environment': evaluate_on_environment(env, epsilon=0.05),
'td_error': td_error_scorer,
'discounted_advantage': discounted_sum_of_advantage_scorer,
'value_scale': average_value_estimation_scorer,
},
tensorboard_dir=cql_log)
cql.save_model(cql_exp)
train(params)
| 33.370629 | 100 | 0.654443 |
7953deafebb2b13f637d904391c56571e300e640 | 7,345 | py | Python | src/utils/train_utils.py | sniafas/photography-style-analysis | b5d828055cf40b127ac69e86af173a77bada3b32 | [
"Apache-2.0"
] | 3 | 2022-01-03T17:57:32.000Z | 2022-02-10T12:32:58.000Z | src/utils/train_utils.py | sniafas/photography-style-analysis | b5d828055cf40b127ac69e86af173a77bada3b32 | [
"Apache-2.0"
] | null | null | null | src/utils/train_utils.py | sniafas/photography-style-analysis | b5d828055cf40b127ac69e86af173a77bada3b32 | [
"Apache-2.0"
] | null | null | null | import json
import numpy as np
import pandas as pd
import tensorflow as tf
import importlib
import datetime
import tensorflow_addons as tfa
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy, Mean, Precision, Recall, AUC
import matplotlib.pyplot as plt
from matplotlib import rc
from tensorflow.keras.optimizers import Adam, RMSprop, Adagrad, SGD, Adadelta, Nadam
from tensorflow.keras.models import load_model
from src.configuration.config import Configuration
from src.transformations.basic import normalization, decode_img, oh_label
from src.architectures.model_architectures import Architecture
def dataset_to_tensor(data, batch_size, shuffle=False, batch=True, mode="sl"):
"""Convert a dataframe in tensor dataset"""
# convert csv records to tensors
image_path = tf.convert_to_tensor(data["photo_id"], dtype=tf.string)
if mode == "sl": # supervised train(baseline dataset)
labels = tf.convert_to_tensor(data["label"])
# create a tensor dataset
dataset = tf.data.Dataset.from_tensor_slices((image_path, labels))
# num_parallel_calls=8
dataset = dataset.map(map_dataset, num_parallel_calls=1)
elif mode == "al": # inference
dataset = tf.data.Dataset.from_tensor_slices((image_path))
dataset = dataset.map(map_inference_dataset, num_parallel_calls=1)
if shuffle:
dataset = dataset.shuffle(
tf.data.experimental.cardinality(dataset).numpy() * 3,
reshuffle_each_iteration=False,
)
if batch:
dataset = dataset.batch(batch_size).prefetch(1000)
return dataset
def read_dataset_from_csv(data_type, path):
"""Read dataset from csv
Args:
data_type (str): train/valid/test
Returns:
pd: data
"""
data = pd.read_csv(tf.io.gfile.glob(path + data_type + "*")[0])
return data
def map_dataset(img_path, label):
"""
Returns:
image, label
"""
config = Configuration().get_configuration()
prefix = config["dataset"]["img_path"]
# path/label represent values for a single example
image_file = tf.io.read_file(prefix + img_path)
image = decode_img(image_file)
label = oh_label(label)
return image, label
def map_inference_dataset(path):
"""We don't know the label, we need image and path to know which image to annotate
Returns:
image, path
"""
config = Configuration().get_configuration()
prefix = config["dataset"]["img_path"]
# path/label represent values for a single example
image_file = tf.io.read_file(prefix + path)
image = decode_img(image_file)
return image, path
def active_data_splits(train, valid, pool_dataset, split_ratio, mode):
"""Slice and concatenate simulation dataset for active learning
Args:
train ([type]): training
valid ([type]): validation
pool_dataset ([type]): pool dataset
split_ratio ([type]): split ratio
"""
# stepping addition from 100 to 2000 samples
if mode == "random":
pool_dataset = pool_dataset.sample(pool_dataset.shape[0], random_state=0)
slice_dataset = pool_dataset[:split_ratio]
# take a copy of static dataset
td = train.copy()
vd = valid.copy()
# concatenate w/ the temporal dataset by 80/20 split
td = pd.concat([td, slice_dataset[: int(len(slice_dataset) * 80 / 100)]])
vd = pd.concat([vd, slice_dataset[int(len(slice_dataset) * 80 / 100) :]])
return td, vd
def model_initialise():
"""
Returns:
tf.keras.Model
"""
arch = Architecture()
return arch()
def get_optimizer(learning_rate, opt_name):
"""Select optimizer method
Arguments:
learning_rate: float, learning value
opt_name: str, optimizer name (adam, nadam, rms, adagrad, sgd, adadelta)
Returns:
optimizer object
"""
if opt_name == "adam":
optimizer = Adam(learning_rate)
elif opt_name == "nadam":
optimizer = Nadam(learning_rate)
elif opt_name == "rms":
optimizer = RMSprop(learning_rate)
elif opt_name == "adagrad":
optimizer = Adagrad(learning_rate)
elif opt_name == "sgd":
optimizer = SGD(learning_rate, nesterov=True)
elif opt_name == "adadelta":
optimizer = Adadelta(learning_rate)
return optimizer
def get_true_labels(dataset, dataset_len, batch_size):
true_y = []
for _, y in dataset.take(dataset_len // batch_size + 1):
true_y.append(np.argmax(y, axis=1))
return np.concatenate(true_y)
def losses_and_metrics(num_classes):
"""
Define loss and metrics
Loss: Categorical Crossentropy
Metrics: (Train, Validation) Accuracy, Precision, Recall, F1
Returns:
loss, train loss, train acc, valid loss, valid acc, precision, recall, auc, f1
"""
loss_fn = CategoricalCrossentropy(from_logits=True)
train_loss = Mean(name="train_loss")
train_accuracy = CategoricalAccuracy("train_accuracy")
valid_loss = Mean(name="valid_loss")
valid_accuracy = CategoricalAccuracy("valid_accuracy")
precision = Precision(name="precision")
recall = Recall(name="recall")
auc = AUC(name="auc")
f1_train = tfa.metrics.F1Score(num_classes=num_classes, average="macro")
f1_loss = tfa.metrics.F1Score(num_classes=num_classes, average="macro")
return (
loss_fn,
train_loss,
train_accuracy,
valid_loss,
valid_accuracy,
precision,
recall,
auc,
f1_train,
f1_loss,
)
def plot_training(results, path_to_save):
config = Configuration().get_configuration()
epochs = config["training"]["epochs"]
title = "Training History"
plt.style.use(["dark_background", "bmh"])
rc("figure", figsize=(5, 8), max_open_warning=False)
rc("axes", facecolor="none")
figure = plt.figure(figsize=(8, 5), facecolor="white")
plt.title(title, {"fontname": "Roboto", "fontsize": 15})
plt.xlabel("Epochs", {"fontname": "Roboto", "fontsize": 12})
plt.ylabel("Accuracy", {"fontname": "Roboto", "fontsize": 12})
plt.plot(results["acc"])
plt.plot(results["val_acc"])
plt.xlim((0, epochs - 1))
plt.grid(axis="x", linestyle="--")
plt.legend(["Training", "Validation"])
plt.savefig(f"{path_to_save}_acc.png")
figure = plt.figure(figsize=(8, 5), facecolor="white")
plt.title(title, {"fontname": "Roboto", "fontsize": 15})
plt.xlabel("Epochs", {"fontname": "Roboto", "fontsize": 12})
plt.ylabel("Loss", {"fontname": "Roboto", "fontsize": 12})
plt.plot(results["loss"])
plt.plot(results["val_loss"])
plt.xlim((0, epochs - 1))
plt.grid(axis="x", linestyle="--")
plt.legend(["Training", "Validation"])
plt.savefig(f"{path_to_save}_loss.png")
figure = plt.figure(figsize=(8, 5), facecolor="white")
plt.title(title, {"fontname": "Roboto", "fontsize": 15})
plt.xlabel("Epochs", {"fontname": "Roboto", "fontsize": 12})
plt.ylabel("F1", {"fontname": "Roboto", "fontsize": 12})
plt.plot(results["f1_score"])
plt.plot(results["val_f1_score"])
plt.xlim((0, epochs - 1))
plt.grid(axis="x", linestyle="--")
plt.legend(["Training", "Validation"])
plt.savefig(f"{path_to_save}_f1.png")
| 29.38 | 86 | 0.661675 |
7953dfc6118a4604304384d8c4b0110cf6fb3d0b | 2,749 | py | Python | _build/jupyter_execute/Module3/quiz3.py | liuzhengqi1996/math452_Spring2022 | b01d1d9bee4778b3069e314c775a54f16dd44053 | [
"MIT"
] | null | null | null | _build/jupyter_execute/Module3/quiz3.py | liuzhengqi1996/math452_Spring2022 | b01d1d9bee4778b3069e314c775a54f16dd44053 | [
"MIT"
] | null | null | null | _build/jupyter_execute/Module3/quiz3.py | liuzhengqi1996/math452_Spring2022 | b01d1d9bee4778b3069e314c775a54f16dd44053 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Quiz 3
# For Penn State student, access quiz [here](https://psu.instructure.com/courses/2177217)
# In[1]:
import ipywidgets as widgets
# ## Question 1
# Is $f(x)=e^x$ a convex function?
# ```{dropdown} Show answer
# Answer: Yes
# ```
# ## Question 2
# Consider the uniform distribution $\mathcal X$ on $[-a,a]$ for some number $a>0$. What are the expectation and variance of $\mathcal X$
# ```{dropdown} Show answer
# Answer:$0, \frac{a^2}{3}.$
# ```
# ## Question 3
# Suppose you flip a fair icon 3 times. Let $\chi$ be the number of heads. Calculate the expectation of $\chi ^2 $
# ```{dropdown} Show answer
# Answer: 3
# ```
# ## Question 4
# Consider the function $f(x,y,z)=yz+e^{xyz}$. At the point
#
# $
# \begin{pmatrix}
# x\\
# y\\
# z
# \end{pmatrix}
# =
# \begin{pmatrix}
# 0\\
# 1\\
# 2
# \end{pmatrix}
# $
#
# find the direction along which the function decreases most rapidly.
#
# ```{dropdown} Show answer
# Answer: $\begin{pmatrix} -2\\-2\\-1\end{pmatrix}$
# ```
# ## Question 5
# Consider $f(x,y)=2x^2+2y^2.$ Given initial guess
#
# $
# \begin{pmatrix}
# x^0\\
# y^0
# \end{pmatrix}
# =
# \begin{pmatrix}
# 2\\
# 3
# \end{pmatrix}
# $
#
# $\eta =1/8$
#
# compute two steps of the gradient descent method for $f(x,y)$
# ```{dropdown} Show answer
# Answer:
#
# $
# \begin{pmatrix}
# x^2\\
# y^2
# \end{pmatrix}
# =
# \begin{pmatrix}
# \frac {1}{2}\\
# \frac {3}{4}
# \end{pmatrix}
# $
# ```
# ## Question 6
# What is output of the following code?
# In[2]:
class test:
def _ _init_ _(self, a):
self.a=a
def display(self):
print(self.a)
obj = test()
obj.display()
# ```{dropdown} Show answer
# Answer: Error as one argument is required while creating the object
# ```
# ## Question 7
# If we use "import Course'' in Python, what is "Course"?
# ```{dropdown} Show answer
# Answer: A module
# ```
# ## Question 8
# What is the output of the following code:
# In[ ]:
print('{}\n/{}'.format(1,2))
# ```{dropdown} Show answer
# Answer: 1
#
# /2
# ```
# ## Question 9
# How to define stochastic gradient descent method with learing rate=1 after:
# In[ ]:
import torch.optim
import torch.nn as nn
my_model=nn.Linear(784,10)
# ```{dropdown} Show answer
# Answer: optimizer = torch.optim.SGD(my_model.parameters(), lr=1)
# ```
# ## Queation 10
# For MNIST dataset, if we would like to use full gradient descent method, how should we define the trainloader?
# ```{dropdown} Show answer
# Answer:trainloader = torch.utils.data.DataLoader(trainset, batch_size=60000)
# ```
# In[ ]:
| 17.074534 | 137 | 0.582757 |
7953dfe73af24a3990b3c7bdab2e6a272012b368 | 6,208 | py | Python | training/cb_vae.py | aseembits93/avoiding-side-effects | f6cbe803749df93c07cdd0a8dbc68d18bb8c11b2 | [
"Apache-2.0"
] | 8 | 2020-11-22T15:34:35.000Z | 2022-02-24T10:05:32.000Z | training/cb_vae.py | neale/avoiding-side-effects | 72ebc8ce5d66c846780aa9e710ae56ff12bd23af | [
"Apache-2.0"
] | null | null | null | training/cb_vae.py | neale/avoiding-side-effects | 72ebc8ce5d66c846780aa9e710ae56ff12bd23af | [
"Apache-2.0"
] | 1 | 2022-02-24T10:05:47.000Z | 2022-02-24T10:05:47.000Z | ###############################################################################
# CB-VAE code adapted from https://github.com/Robert-Aduviri/Continuous-Bernoulli-VAE
###############################################################################
#MIT License
#Copyright (c) 2019 Robert Aduviri
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.a
###############################################################################
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim
from torchvision.utils import save_image
class VAE(nn.Module):
# VAE model
## Architectured Based on Appendix by Authors
## https://arxiv.org/src/1907.06845v4/anc/cont_bern_aux.pdf
def __init__(self, z_dim, input_dim):
super(VAE, self).__init__()
# Encoder layers
self.z_dim = z_dim
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim*input_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 256)
self.fc3a1 = nn.Linear(256, 128)
self.fc3b1 = nn.Linear(256, 128)
self.fc3a2 = nn.Linear(128, z_dim)
self.fc3b2 = nn.Linear(128, z_dim)
# Decoder layers
self.fc4 = nn.Linear(z_dim, 128)
self.fc5 = nn.Linear(128, 256)
self.fc6 = nn.Linear(256, 512)
self.fc7 = nn.Linear(512, 512)
self.fc8 = nn.Linear(512, input_dim*input_dim)
def encode(self, x):
#Recognition function
h1 = F.elu(self.fc1(x))
h2 = F.elu(self.fc2(h1))
h3 = F.elu(self.fc3(h2))
h3a1 = F.elu(self.fc3a1(h3))
h3b1 = F.elu(self.fc3b1(h3))
return self.fc3a2(h3a1), F.softplus(self.fc3b2(h3b1))
def reparameterize(self, mu, std):
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def decode(self, z):
#Likelihood function
h4 = F.elu(self.fc4(z))
h5 = F.elu(self.fc5(h4))
h6 = F.elu(self.fc6(h5))
h7 = F.elu(self.fc7(h6))
sigma = None
return torch.sigmoid(self.fc8(h7)), sigma # Gaussian mean
def forward(self, x):
mu, std = self.encode(x.view(-1, self.input_dim*self.input_dim))
z = self.reparameterize(mu, std)
# Return decoding, mean and logvar
return self.decode(z), mu, 2.*torch.log(std)
def sumlogC( x , eps = 1e-5):
'''
Numerically stable implementation of
sum of logarithm of Continous Bernoulli
constant C, using Taylor 3nd degree approximation
Parameter
----------
x : Tensor of dimensions (batch_size, dim)
x takes values in (0,1)
'''
x = torch.clamp(x, eps, 1.-eps)
mask = torch.abs(x - 0.5).ge(eps)
far = torch.masked_select(x, mask)
close = torch.masked_select(x, ~mask)
far_values = torch.log( (torch.log(1. - far) - torch.log(far)).div(1. - 2. * far) )
close_values = torch.log(torch.tensor((2.))) + torch.log(1. + torch.pow( 1. - 2. * close, 2)/3. )
return far_values.sum() + close_values.sum()
def loss_vae(recon_x, x, mu, logvar):
input_dim = x.size(-1)
BCE = F.binary_cross_entropy(recon_x[0], x.view(-1, input_dim*input_dim), reduction='sum')
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
def loss_cbvae(recon_x, x, mu, logvar):
input_dim = x.size(-1)
BCE = F.binary_cross_entropy(recon_x[0], x.view(-1, input_dim*input_dim), reduction='sum')
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
LOGC = -sumlogC(recon_x[0])
return BCE + KLD + LOGC
def encode_state(model, state, device):
input_dim = state.size(-1)
state = state.to(device)
mu, std = model.encode(state.view(-1, input_dim*input_dim))
z = model.reparameterize(mu, std)
return z
def load_state_encoder(z_dim, path, input_dim, device):
model = VAE(z_dim, input_dim).to(device)
ckpt = torch.load(path)
model.load_state_dict(ckpt)
def train_encoder(device, data, z_dim=16, training_epochs=200, exp='test', batch_size=128, log_interval=10):
input_dim = data.size(-1)
model = VAE(z_dim, input_dim).to(device)
loss_fn = loss_cbvae
batch_size = 64
dataset = torch.utils.data.TensorDataset(data)
dataloader = torch.utils.data.DataLoader(dataset, batch_size, drop_last=True, shuffle=True)
optimizer = optim.Adam(model.parameters(), lr=1e-4)
total_batches = 0
recon_batch = None
for epoch in range(1, training_epochs + 1):
model.train()
train_loss = 0
for batch_idx, data in enumerate(dataloader):
data = torch.stack(data).float().squeeze(0)
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, logvar = model(data)
loss = loss_fn(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
total_batches += 1
if total_batches % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]'.format(
epoch, batch_idx * len(data), len(dataloader.dataset),
100. * batch_idx / len(dataloader)))
return model
| 37.39759 | 108 | 0.617429 |
7953e077c12a5d8de475c3816a171cb9e69431d8 | 8,854 | py | Python | src/bio2bel_kegg/client.py | jmarinllao/kegg | 321a1a5157f66922d84e8f8dce11f105c6fd52a7 | [
"MIT"
] | null | null | null | src/bio2bel_kegg/client.py | jmarinllao/kegg | 321a1a5157f66922d84e8f8dce11f105c6fd52a7 | [
"MIT"
] | null | null | null | src/bio2bel_kegg/client.py | jmarinllao/kegg | 321a1a5157f66922d84e8f8dce11f105c6fd52a7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module parsers the description files -> http://rest.kegg.jp/get/ in KEGG RESTful API."""
import itertools as itt
import logging
import os
from multiprocessing.pool import ThreadPool
from operator import itemgetter
from typing import Any, Collection, Iterable, List, Mapping, Optional, Tuple
from urllib.request import urlretrieve
from protmapper.api import hgnc_name_to_id
from protmapper.uniprot_client import get_entrez_id, um
from tqdm import tqdm
from .constants import ENTITY_DIRECTORY, XREF_MAPPING
__all__ = [
'get_entities_lines',
'parse_protein_lines',
'parse_pathway_lines',
]
logger = logging.getLogger(__name__)
HGNC_ID_TO_ENTREZ_ID = {
hgnc: get_entrez_id(uniprot)
for uniprot, hgnc in um.uniprot_hgnc.items()
}
ENTREZ_ID_TO_HGNC_ID = {v: k for k, v in HGNC_ID_TO_ENTREZ_ID.items()}
HGNC_ID_TO_SYMBOL = {v: k for k, v in hgnc_name_to_id.items()}
def get_entities_lines(
entity_ids: Collection[str],
thread_pool_size: Optional[int] = None,
) -> List[Tuple[str, List[str]]]:
"""Get entities.
:param entity_ids: Can be KEGG pathway identifiers or KEGG protein identifiers
:param thread_pool_size:
"""
# Multi-thread processing of protein description requests
if thread_pool_size is None:
thread_pool_size = 3
with ThreadPool(processes=thread_pool_size) as pool:
results: Iterable[Tuple[str, List[str]]] = pool.imap_unordered(ensure_kegg_entity, entity_ids)
# make sure it gets the whole way through this before doing the next step
results: Iterable[Tuple[str, List[str]]] = tqdm(
results,
total=len(entity_ids),
desc=f'Fetching protein information ({thread_pool_size} threads)',
)
return list(results)
def ensure_kegg_entity(entity_id: str) -> Tuple[str, List[str]]:
"""Send a given entity to the KEGG API and process the results.
:param entity_id: A KEGG entity identifier (with prefix)
"""
prefix, identifier = entity_id.split(':', 1)
entity_type_directory = os.path.join(ENTITY_DIRECTORY, prefix)
os.makedirs(entity_type_directory, exist_ok=True)
entity_text_path = os.path.join(entity_type_directory, f'{identifier}.txt')
if not os.path.exists(entity_text_path):
urlretrieve(f'http://rest.kegg.jp/get/{entity_id}', entity_text_path) # noqa:S310
with open(entity_text_path) as file:
lines = [line.strip() for line in file]
return entity_id, lines
def iterate_groups(lines):
current_key = None
current_subkey = None
for line in lines:
if line.startswith('///') or not line.strip():
continue
key, val = line[:12].rstrip(), line[12:].rstrip()
if key:
if not key.startswith(' '):
current_key = key
current_subkey = None
else:
current_subkey = key.strip()
if current_subkey is not None:
yield (current_key, current_subkey), val
else:
yield current_key, val
def get_line(lines: Iterable[str]) -> str:
return list(lines)[0]
def parse_protein_lines(lines: Iterable[str]) -> Mapping[str, Any]:
"""Parse the lines of a KEGG protein info file."""
rv = {'xrefs': []}
for group, group_lines in itt.groupby(iterate_groups(lines), key=itemgetter(0)):
group_lines = (line for _, line in group_lines)
if group == 'ENTRY':
line = get_line(group_lines)
kegg_id, _, _ = [x.strip() for x in line.split() if x.strip()] # not sure what the other two are
rv['identifier'] = kegg_id.strip()
elif group == 'DEFINITION':
rv['definition'] = get_line(group_lines)
elif group == 'ORTHOLOGY':
rv['orthology'] = _get_xref_names(group_lines, prefix='kegg.orthology')
elif group == 'ORGANISM':
line: str = get_line(group_lines)
p_index = line.index('(')
rv['species'] = {'name': line[:p_index].rstrip()}
elif group == 'PATHWAY':
rv['pathway'] = _get_xref_names(group_lines, prefix='kegg.pathway')
elif group == 'BRITE':
pass
elif group == 'POSITION':
pass # rv['position'] = int(get_line(lines))
elif group == 'MOTIF':
rv['motif'] = _get_xrefs(group_lines)
elif group == 'DBLINKS':
rv['xrefs'] = _get_xrefs(group_lines)
else:
pass # logger.warning(f'unhandled group: {group}')
return rv
def parse_pathway_lines(lines: Iterable[str]) -> Mapping[str, Any]:
"""Parse the lines of a KEGG pathway info file."""
rv = {}
for group, group_lines in itt.groupby(iterate_groups(lines), key=itemgetter(0)):
group_lines = (line for _, line in group_lines)
if group == 'ENTRY':
line = get_line(group_lines)
kegg_id = line.split()[0]
rv['identifier'] = kegg_id
elif group == 'NAME':
rv['name'] = get_line(group_lines)
elif group == 'DESCRIPTION':
rv['definition'] = get_line(group_lines)
elif group == 'CLASS':
pass
elif group == 'PATHWAY_MAP':
pass
elif group == 'MODULE':
pass
elif group == 'NETWORK':
pass
elif group == 'DRUG':
drugs = []
for line in group_lines:
xref, name = line.split(' ')
try:
p_index = name.rindex('(')
except ValueError:
logger.warning(f'could not parse line: {line}')
continue
name, note = name[:p_index - 1], name[1 + p_index:].rstrip().rstrip(')')
drugs.append({'identifier': xref, 'name': name, 'note': note.split('/')})
rv['drugs'] = drugs
elif group == 'DISEASE':
rv['diseases'] = _get_xref_names(group_lines, prefix='kegg.disease')
elif group == 'DBLINKS':
rv['xrefs'] = _get_xrefs(group_lines)
elif group == 'ORGANISM':
line: str = get_line(group_lines)
p_index = line.index('(')
rv['species'] = {'name': line[:p_index].rstrip()}
elif group == 'GENE':
genes = []
for line in group_lines:
xref, info = line.split(' ', 1)
symbol, info = info.split(';')
name, info = info.lstrip().split(' [', 1)
orthology, ec = info.split(']', 1)
orthology_codes = orthology[len('KO:'):].split(' ')
if ec.strip().lstrip('[').rstrip(']'):
ec_codes = ec[len('EC:'):].split(' ')
else:
ec_codes = []
genes.append({
'prefix': 'ncbigene',
'identifier': xref,
'name': symbol,
'definition': name,
'orthologies': [
{
'prefix': 'kegg.orthology',
'identifier': orthology_code,
}
for orthology_code in orthology_codes
],
'enzyme_classes': [
{'prefix': 'ec-code', 'identifier': ec_code}
for ec_code in ec_codes
],
})
rv['genes'] = genes
elif group == 'COMPOUND':
rv['compounds'] = _get_xref_names(group_lines, prefix='kegg.compound')
elif group == 'REFERENCE':
line = get_line(group_lines)
if line.startswith('PMID'):
pubmed_id = line[len('PMID:'):]
rv['reference'] = {'pubmed_id': pubmed_id}
else:
continue
elif group == 'REL_PATHWAY':
rv['related'] = _get_xref_names(group_lines, prefix='kegg.pathway')
elif group == 'KO_PATHWAY':
pass
else:
pass # logger.warning(f'unhandled group: {group}')
return rv
def _get_xrefs(group_lines: Iterable[str]) -> List[Mapping[str, Any]]:
xrefs_list = []
for line in group_lines:
prefix, xrefs = line.strip().split(':')
prefix = XREF_MAPPING.get(prefix, prefix)
for xref in xrefs.strip().split(' '):
xrefs_list.append({'prefix': prefix, 'identifier': xref})
return xrefs_list
def _get_xref_names(group_lines, prefix):
rv = []
for line in group_lines:
try:
xref, name = line.split(' ')
except ValueError:
logger.warning(f'Could not split line: {line}')
continue
rv.append({'prefix': prefix, 'identifier': xref, 'name': name})
return rv
| 36.138776 | 109 | 0.563022 |
7953e1665e649283a433665f96f6d869a9e0211f | 5,214 | py | Python | tests/measurement/editors/test_sequence_editor.py | rassouly/exopy_pulses | bf71a3899c9b04434ee928ede08a21a0dd62f0a5 | [
"BSD-3-Clause"
] | null | null | null | tests/measurement/editors/test_sequence_editor.py | rassouly/exopy_pulses | bf71a3899c9b04434ee928ede08a21a0dd62f0a5 | [
"BSD-3-Clause"
] | null | null | null | tests/measurement/editors/test_sequence_editor.py | rassouly/exopy_pulses | bf71a3899c9b04434ee928ede08a21a0dd62f0a5 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyPulses Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Check that the manifest does register the editor.
"""
import os
from collections import OrderedDict
import enaml
import pytest
from exopy.tasks.api import RootTask
from exopy.testing.util import show_widget
from exopy.tasks.tasks.instr_task import (PROFILE_DEPENDENCY_ID,
DRIVER_DEPENDENCY_ID)
with enaml.imports():
from exopy_pulses.pulses.manifest import PulsesManagerManifest
from exopy_pulses.measurement.manifest import PulsesMeasurementManifest
from exopy_pulses.pulses.pulse import Pulse
from exopy_pulses.pulses.utils.sequences_io import save_sequence_prefs
from exopy_pulses.pulses.sequences.base_sequences\
import RootSequence
from exopy_pulses.testing.context import TestContext
from exopy_pulses.tasks.tasks.instrs.transfer_sequence_task\
import TransferPulseSequenceTask
with enaml.imports():
from .contributions import PulsesContributions
pytest_plugins = (str('exopy.testing.measurement.fixtures'),)
class FalseStarter(object):
"""False instrument starter used for testing.
"""
finalize_called = False
def __init__(self, should_pass=True):
self.should_pass = should_pass
def check_infos(self, driver_cls, connection, settings):
return self.should_pass, 'Message'
def start(self, driver_cls, connection, settings):
return object()
def stop(self, driver):
FalseStarter.stop_called = True
@pytest.fixture
def sequence():
"""Create a sequence.
"""
root = RootSequence()
context = TestContext(sampling=0.5)
root.context = context
root.external_vars = OrderedDict({'a': None})
root.local_vars = OrderedDict({'b': '2*{a}'})
pulse1 = Pulse(def_1='1.0', def_2='{a}', def_mode=str('Start/Stop'))
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10 + {b}')
for i, c in enumerate((pulse1, pulse2, pulse3)):
root.add_child_item(i, c)
return root
@pytest.fixture
def task(sequence, tmpdir):
"""Transfer sequence task for testing.
"""
p_id = PROFILE_DEPENDENCY_ID
d_id = DRIVER_DEPENDENCY_ID
root = RootTask()
root.run_time = {d_id: {'d': (object, FalseStarter())},
p_id: {'p': {'connections': {'c': {}, 'c2': {}},
'settings': {'s': {}}}}}
path = os.path.join(str(tmpdir), 'test.pulse.ini')
save_sequence_prefs(path, sequence.preferences_from_members())
task = TransferPulseSequenceTask(sequence=sequence, sequence_path=path,
sequence_timestamp=os.path.getmtime(path),
sequence_vars=OrderedDict({'a': '1.5'}),
name='Test',
selected_instrument=('p', 'd', 'c', 's'))
root.add_child_task(0, task)
return task
@pytest.fixture
def editor(measurement_workbench):
"""Create a pulse sequence editor.
"""
measurement_workbench.register(PulsesManagerManifest())
measurement_workbench.register(PulsesMeasurementManifest())
measurement_workbench.register(PulsesContributions())
pl = measurement_workbench.get_plugin('exopy.measurement')
decls = pl.get_declarations('editor', ['exopy_pulses.pulse_sequence'])
decl = decls['exopy_pulses.pulse_sequence']
ed = decl.new(measurement_workbench)
return ed
def test_sequence_vars_update(measurement_workbench, editor, task,
exopy_qtbot, dialog_sleep):
"""Test that when unselecting the editor we properly synchronize the vars.
"""
task.sequence_vars = OrderedDict({'a': '1.5', 'b': '2'})
win = show_widget(exopy_qtbot, editor)
editor.selected_task = task
exopy_qtbot.wait(10 + dialog_sleep)
root_view = editor.page_widget().widgets()[0].scroll_widget().widgets()[0]
vars_editor = root_view.additional_pages[0]
vars_editor.page_widget().widgets()[0].text = 'a, c'
editor.react_to_unselection(measurement_workbench)
assert task.sequence_vars == OrderedDict([('a', '1.5'), ('c', '')])
win.close()
def test_sequence_replacement(editor, task, exopy_qtbot, dialog_sleep):
"""Test replacing the sequence (a priori not possible).
"""
editor.selected_task = task
show_widget(exopy_qtbot, editor)
root_view = editor.page_widget().widgets()[0].scroll_widget().widgets()[0]
old = root_view.additional_pages[0]
seq = task.sequence
task.sequence = sequence()
exopy_qtbot.wait(10 + dialog_sleep)
task.sequence = seq
exopy_qtbot.wait(10 + dialog_sleep)
root_view = editor.page_widget().widgets()[0].scroll_widget().widgets()[0]
exopy_qtbot.wait(10 + dialog_sleep)
new = root_view.additional_pages[0]
assert old is not new
| 32.792453 | 79 | 0.654392 |
7953e26750be7e9bc6b941552f7cfa1196d359bf | 3,044 | py | Python | stanCode_projects/anagram/anagram.py | lindcrj/stanCode_projects | 2ce2f7b51fb1e6940ab2b62b05edccfef66739b4 | [
"MIT"
] | null | null | null | stanCode_projects/anagram/anagram.py | lindcrj/stanCode_projects | 2ce2f7b51fb1e6940ab2b62b05edccfef66739b4 | [
"MIT"
] | null | null | null | stanCode_projects/anagram/anagram.py | lindcrj/stanCode_projects | 2ce2f7b51fb1e6940ab2b62b05edccfef66739b4 | [
"MIT"
] | null | null | null | """
File: anagram.py
Name:
----------------------------------
This program recursively finds all the anagram(s)
for the word input by user and terminates when the
input string matches the EXIT constant defined
at line 19
If you correctly implement this program, you should see the
number of anagrams for each word listed below:
* arm -> 3 anagrams
* contains -> 5 anagrams
* stop -> 6 anagrams
* tesla -> 10 anagrams
* spear -> 12 anagrams
"""
import time # This file allows you to calculate the speed of your algorithm
# Constants
FILE = 'dictionary.txt' # This is the filename of an English dictionary
EXIT = '-1' # Controls when to stop the loop
names = []
ana = 0
ana_lst = []
prefix_ind = []
def main():
global ana, ana_lst
"""
TODO:
"""
# start = time.time()
####################
# #
# TODO: #
# #
####################
# end = time.time()
print("Welcome to stanCode '"'Anagram Generator'"'(or -1 to quit)")
dictxt = read_dictionary(FILE)
while True:
print('----------------------------------')
s = input('Find anagrams for: ')
if s == EXIT:
break
else:
print("Searching...")
start = time.time()
find_anagrams(s, [], [])
end = time.time()
print(ana, "anagrams: ", ana_lst)
ana = 0
ana_lst = []
print('----------------------------------')
print(f'The speed of your anagram algorithm: {end-start} seconds.')
def read_dictionary(filename):
global names
with open(filename, 'r') as f:
for line in f:
line = line.strip()
names.append(line)
return names
def find_anagrams(s, new_lst, index_lst):
"""
:param s:
:return:
"""
global ana, ana_lst,prefix_ind
s_lst = list(str(s))
if len(index_lst) == len(s_lst):
new_lst = []
for ind in index_lst:
new_lst.append(s_lst[ind])
s = "".join(new_lst)
if s in names:
if s not in ana_lst:
ana += 1
ana_lst.append(s)
print("Found: ", s)
print("Searching...")
else:
for i in range(len(s_lst)):
if i in index_lst:
pass
else:
index_lst.append(i)
if len(index_lst) == 2:
for ind in index_lst:
prefix_ind.append(s_lst[ind])
sub_s = "".join(prefix_ind)
if has_prefix(sub_s) is False:
pass
find_anagrams(s, new_lst, index_lst)
index_lst.pop()
return s, ana, ana_lst
def has_prefix(sub_s):
"""
:param sub_s:
:return:
"""
for name in names:
if str(name).startswith(sub_s):
return True
return False
if __name__ == '__main__':
main()
| 24.747967 | 93 | 0.486859 |
7953e2c043c82a67cfdc084b9a81de2bc3b5121b | 10,019 | py | Python | FacebookPostsScraper.py | hackingbutlegal/FacebookPostsScraper | aa873eb97f7b1ad00ca0dbb4da1d63f38bcbbdb9 | [
"MIT"
] | 73 | 2020-05-14T04:12:06.000Z | 2021-09-02T14:36:20.000Z | FacebookPostsScraper.py | hackingbutlegal/FacebookPostsScraper | aa873eb97f7b1ad00ca0dbb4da1d63f38bcbbdb9 | [
"MIT"
] | 20 | 2020-06-08T10:19:18.000Z | 2021-04-29T21:22:27.000Z | FacebookPostsScraper.py | hackingbutlegal/FacebookPostsScraper | aa873eb97f7b1ad00ca0dbb4da1d63f38bcbbdb9 | [
"MIT"
] | 40 | 2020-05-14T04:11:56.000Z | 2021-08-21T04:32:35.000Z | import requests
from bs4 import BeautifulSoup
import pickle
import os
from urllib.parse import urlparse, unquote
from urllib.parse import parse_qs
import pandas as pd
import json
class FacebookPostsScraper:
# We need the email and password to access Facebook, and optionally the text in the Url that identifies the "view full post".
def __init__(self, email, password, post_url_text='Full Story'):
self.email = email
self.password = password
self.headers = { # This is the important part: Nokia C3 User Agent
'User-Agent': 'NokiaC3-00/5.0 (07.20) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+'
}
self.session = requests.session() # Create the session for the next requests
self.cookies_path = 'session_facebook.cki' # Give a name to store the session in a cookie file.
# At certain point, we need find the text in the Url to point the url post, in my case, my Facebook is in
# English, this is why it says 'Full Story', so, you need to change this for your language.
# Some translations:
# - English: 'Full Story'
# - Spanish: 'Historia completa'
self.post_url_text = post_url_text
# Evaluate if NOT exists a cookie file, if NOT exists the we make the Login request to Facebook,
# else we just load the current cookie to maintain the older session.
if self.new_session():
self.login()
self.posts = [] # Store the scraped posts
# We need to check if we already have a session saved or need to log to Facebook
def new_session(self):
if not os.path.exists(self.cookies_path):
return True
f = open(self.cookies_path, 'rb')
cookies = pickle.load(f)
self.session.cookies = cookies
return False
# Utility function to make the requests and convert to soup object if necessary
def make_request(self, url, method='GET', data=None, is_soup=True):
if len(url) == 0:
raise Exception(f'Empty Url')
if method == 'GET':
resp = self.session.get(url, headers=self.headers)
elif method == 'POST':
resp = self.session.post(url, headers=self.headers, data=data)
else:
raise Exception(f'Method [{method}] Not Supported')
if resp.status_code != 200:
raise Exception(f'Error [{resp.status_code}] > {url}')
if is_soup:
return BeautifulSoup(resp.text, 'lxml')
return resp
# The first time we login
def login(self):
# Get the content of HTML of mobile Login Facebook page
url_home = "https://m.facebook.com/"
soup = self.make_request(url_home)
if soup is None:
raise Exception("Couldn't load the Login Page")
# Here we need to extract this tokens from the Login Page
lsd = soup.find("input", {"name": "lsd"}).get("value")
jazoest = soup.find("input", {"name": "jazoest"}).get("value")
m_ts = soup.find("input", {"name": "m_ts"}).get("value")
li = soup.find("input", {"name": "li"}).get("value")
try_number = soup.find("input", {"name": "try_number"}).get("value")
unrecognized_tries = soup.find("input", {"name": "unrecognized_tries"}).get("value")
# This is the url to send the login params to Facebook
url_login = "https://m.facebook.com/login/device-based/regular/login/?refsrc=https%3A%2F%2Fm.facebook.com%2F&lwv=100&refid=8"
payload = {
"lsd": lsd,
"jazoest": jazoest,
"m_ts": m_ts,
"li": li,
"try_number": try_number,
"unrecognized_tries": unrecognized_tries,
"email": self.email,
"pass": self.password,
"login": "Iniciar sesión",
"prefill_contact_point": "",
"prefill_source": "",
"prefill_type": "",
"first_prefill_source": "",
"first_prefill_type": "",
"had_cp_prefilled": "false",
"had_password_prefilled": "false",
"is_smart_lock": "false",
"_fb_noscript": "true"
}
soup = self.make_request(url_login, method='POST', data=payload, is_soup=True)
if soup is None:
raise Exception(f"The login request couldn't be made: {url_login}")
redirect = soup.select_one('a')
if not redirect:
raise Exception("Please log in desktop/mobile Facebook and change your password")
url_redirect = redirect.get('href', '')
resp = self.make_request(url_redirect)
if resp is None:
raise Exception(f"The login request couldn't be made: {url_redirect}")
# Finally we get the cookies from the session and save it in a file for future usage
cookies = self.session.cookies
f = open(self.cookies_path, 'wb')
pickle.dump(cookies, f)
return {'code': 200}
# Scrap a list of profiles
def get_posts_from_list(self, profiles):
data = []
n = len(profiles)
for idx in range(n):
profile = profiles[idx]
print(f'{idx + 1}/{n}. {profile}')
posts = self.get_posts_from_profile(profile)
data.append(posts)
return data
# This is the extraction point!
def get_posts_from_profile(self, url_profile):
# Prepare the Url to point to the posts feed
if "www." in url_profile: url_profile = url_profile.replace('www.', 'm.')
if 'v=timeline' not in url_profile:
if '?' in url_profile:
url_profile = f'{url_profile}&v=timeline'
else:
url_profile = f'{url_profile}?v=timeline'
is_group = '/groups/' in url_profile
# Make a simple GET request
soup = self.make_request(url_profile)
if soup is None:
print(f"Couldn't load the Page: {url_profile}")
return []
# Now the extraction...
css_profile = '.storyStream > div' # Select the posts from a user profile
css_page = '#recent > div > div > div' # Select the posts from a Facebook page
css_group = '#m_group_stories_container > div > div' # Select the posts from a Facebook group
raw_data = soup.select(f'{css_profile} , {css_page} , {css_group}') # Now join and scrape it
posts = []
for item in raw_data: # Now, for every post...
published = item.select_one('abbr') # Get the formatted datetime of published
description = item.select('p') # Get list of all p tag, they compose the description
images = item.select('a > img') # Get list of all images
_external_links = item.select('p a') # Get list of any link in the description, this are external links
post_url = item.find('a', text=self.post_url_text) # Get the url to point this post.
like_url = item.find('a', text='Like') # Get the Like url.
# Clean the publish date
if published is not None:
published = published.get_text()
else:
published = ''
# Join all the text in p tags, else set empty string
if len(description) > 0:
description = '\n'.join([d.get_text() for d in description])
else:
description = ''
# Get all the images links
images = [image.get('src', '') for image in images]
# Clean the post link
if post_url is not None:
post_url = post_url.get('href', '')
if len(post_url) > 0:
post_url = f'https://www.facebook.com{post_url}'
p_url = urlparse(post_url)
qs = parse_qs(p_url.query)
if not is_group:
post_url = f'{p_url.scheme}://{p_url.hostname}{p_url.path}?story_fbid={qs["story_fbid"][0]}&id={qs["id"][0]}'
else:
post_url = f'{p_url.scheme}://{p_url.hostname}{p_url.path}/permalink/{qs["id"][0]}/'
else:
post_url = ''
# Clean the Like link
if like_url is not None:
like_url = like_url.get('href', '')
if len(like_url) > 0:
like_url = f'https://m.facebook.com{like_url}'
else:
like_url = ''
# Get list of external links in post description, if any inside
external_links = []
for link in _external_links:
link = link.get('href', '')
try:
a = link.index("u=") + 2
z = link.index("&h=")
link = unquote(link[a:z])
link = link.split("?fbclid=")[0]
external_links.append(link)
except ValueError as e:
continue
post = {'published': published, 'description': description, 'images': images,
'post_url': post_url, 'external_links': external_links, 'like_url': like_url}
posts.append(post)
self.posts.append(post)
return posts
def posts_to_csv(self, filename):
if filename[:-4] != '.csv':
filename = f'{filename}.csv'
df = pd.DataFrame(self.posts)
df.to_csv(filename)
def posts_to_excel(self, filename):
if filename[:-5] != '.xlsx':
filename = f'{filename}.xlsx'
df = pd.DataFrame(self.posts)
df.to_excel(filename)
def posts_to_json(self, filename):
if filename[:-5] != '.json':
filename = f'{filename}.json'
with open(filename, 'w') as f:
f.write('[')
for entry in self.posts:
json.dump(entry, f)
f.write(',\n')
f.write(']')
| 40.236948 | 151 | 0.566524 |
7953e2dc552b67874fc880f5bf9399057ead3146 | 6,270 | py | Python | sgespawner/spawner.py | willfurnass/sgespawner | b294c6194c511bdbc870e13c377ffb3233e3f406 | [
"BSD-2-Clause"
] | 1 | 2019-04-24T14:35:02.000Z | 2019-04-24T14:35:02.000Z | sgespawner/spawner.py | willfurnass/sgespawner | b294c6194c511bdbc870e13c377ffb3233e3f406 | [
"BSD-2-Clause"
] | 7 | 2017-05-08T14:12:36.000Z | 2020-01-14T16:36:07.000Z | sgespawner/spawner.py | willfurnass/sgespawner | b294c6194c511bdbc870e13c377ffb3233e3f406 | [
"BSD-2-Clause"
] | null | null | null | from subprocess import Popen, PIPE, STDOUT, run
import jinja2
from defusedxml import ElementTree as ET
from jupyterhub.utils import random_port
from jupyterhub.spawner import Spawner
from tornado import gen
from traitlets import List, Unicode
__all__ = ['SGESpawner']
class SGESpawner(Spawner):
sge_template = Unicode('', config=True,
help="Filename of Jinja 2 template for a SGE batch job script")
jh_env_vars_for_job = List([
"JPY_API_TOKEN",
"JUPYTERHUB_API_TOKEN",
"JUPYTERHUB_CLIENT_ID",
"JUPYTERHUB_HOST",
"JUPYTERHUB_OAUTH_CALLBACK_URL",
"JUPYTERHUB_USER",
"JUPYTERHUB_API_URL",
"JUPYTERHUB_BASE_URL",
"JUPYTERHUB_SERVICE_PREFIX"
],
config=False,
help="JupyterHub-related environment variables to pass to SGE job")
def __init__(self, *args, **kwargs):
super(SGESpawner, self).__init__(*args, **kwargs)
self.cmd_prefix = ['sudo', '-u', self.user.name]
self.jobid = None
def qstat_t(self, jobid, column):
"""
Call qstat -t and extract information about a job.
Parameters
----------
jobid : `int`
The numeric ID of the job to search for.
column : `string`
The name of the column to extract the information about, can be
"host" or "state".
Returns
-------
result : `string`
The value of the column, or None if the job can not be found
"""
cmd = self.cmd_prefix + ['qstat', '-t', '-xml', '-u', self.user.name]
qstat_output = run(cmd, stdout=PIPE, env=self.get_env())
jobinfo = qstat_output.stdout.decode('utf-8')
root = ET.fromstring(jobinfo)
xpath_template = "queue_info/job_list[JB_job_number='{}']"
job_node = root.find(xpath_template.format(jobid))
ret = None
if job_node is not None and len(job_node) >= 1:
if column == 'host':
ret = job_node.find('queue_name').text # .split('@')[1]
else: # column == 'state'
ret = job_node.find('state').text
return ret
def load_state(self, state):
"""Load state from the database."""
super(SGESpawner, self).load_state(state)
if 'jobid' in state:
self.jobid = state['jobid']
def get_state(self):
"""Get the current state."""
state = super(SGESpawner, self).get_state()
if self.jobid:
state['jobid'] = self.jobid
return state
def clear_state(self):
"""Clear any state (called after shutdown)."""
super(SGESpawner, self).clear_state()
self.jobid = None
@gen.coroutine
def start(self):
"""
Submit the (single-user Jupyter server) job to SGE and wait for it to start.
Also stores the IP and port of the single-user server in self.user.server.
NB you can relax the Spawner.start_timeout config value as necessary to
ensure that the SGE job is given enough time to start.
"""
self.port = random_port()
# Open a (Jinja2) template for a batch job
with open(self.sge_template, 'r') as f:
# Instantiate the template using the username and
# some arguments for the single-user Jupyter server process
batch_job_submission_script = jinja2.Template(f.read()).render(
working_dir='/home/{}'.format(self.user.name),
jh_args=' '.join(self.get_args()),
user_options=self.user_options)
self.log.info("SGE: batch job sub script: '{}'".format(
batch_job_submission_script))
# Ensure command for submitting job run as correct user
# by prefixing command with sudo -u <username>
cmd = self.cmd_prefix.copy()
# Ensure the JupyterHub API token is defined in
# the worker session
cmd.extend(['qsub', '-v', ','.join(self.jh_env_vars_for_job)])
self.log.info("SGE: CMD: {}".format(cmd))
self.proc = Popen(cmd,
stdout=PIPE,
stdin=PIPE,
stderr=STDOUT,
env=self.get_env())
# Pipe the batch job submission script (filled-in Jinja2 template)
# to the job submission script (saves having to create a temporary
# file and deal with permissions)
r = self.proc.communicate(
input=batch_job_submission_script.encode('utf-8'))[0].decode('utf-8')
self.log.info("SGE: {}".format(r))
# Get the ID of the job submitted to SGE
jid = int(r.split('Your job ')[1].split()[0])
self.jobid = jid
# Wait until the worker session has started
state = self.qstat_t(jid, 'state')
while state != 'r':
yield gen.sleep(2.0)
state = self.qstat_t(jid, 'state')
self.log.info("SGE: Job State: {}".format(state))
# Get and store the IP of the host of the worker session
host = self.qstat_t(jid, 'host')
host = host.split('@')[1].split('.')[0]
self.log.info("SGE: The single user server"
" is running on: {}".format(host))
self.host = host
return (host, self.port)
@gen.coroutine
def stop(self, now=False):
"""Stop the SGESpawner session.
A Tornado coroutine that returns when the process has finished exiting.
"""
if self.jobid:
ret = Popen(self.cmd_prefix +
['qdel', '{}'.format(self.jobid)],
env=self.get_env())
self.log.info("SGE: {}".format(ret))
@gen.coroutine
def poll(self):
"""Checks if the SGESpawner session is still running.
Returns None if it is still running and an integer exit status otherwise
"""
if self.jobid is None:
return 0
state = self.qstat_t(self.jobid, 'state')
if state:
if state == 'r':
# The session is running
return None
else: # qw is not an option here.
return 1
else:
return 0
| 34.450549 | 84 | 0.572249 |
7953e33b03cf8d39cf9a5ab477477544cb178be4 | 12,160 | py | Python | qa/complete_moderated_with_timeout.py | anchaj/openbazaar-go | ce88cba2f1ee2edec356f908cc4acbbf28c9a3de | [
"MIT"
] | 2 | 2019-08-04T01:15:39.000Z | 2019-10-30T23:08:37.000Z | qa/complete_moderated_with_timeout.py | anchaj/openbazaar-go | ce88cba2f1ee2edec356f908cc4acbbf28c9a3de | [
"MIT"
] | 24 | 2017-12-08T22:34:36.000Z | 2019-08-16T14:29:57.000Z | qa/complete_moderated_with_timeout.py | anchaj/openbazaar-go | ce88cba2f1ee2edec356f908cc4acbbf28c9a3de | [
"MIT"
] | 12 | 2017-12-07T05:41:55.000Z | 2018-12-18T12:00:26.000Z | import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class CompleteModeratedWithTimeout(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
def run_test(self):
alice = self.nodes[1]
bob = self.nodes[2]
charlie = self.nodes[3]
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address/" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Address endpoint not found")
else:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, 10)
time.sleep(20)
# create a profile for charlie
pro = {"name": "Charlie"}
api_url = charlie["gateway_url"] + "ob/profile"
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Profile post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Profile POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# make charlie a moderator
with open('testdata/moderation.json') as listing_file:
moderation_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = charlie["gateway_url"] + "ob/moderator"
r = requests.put(api_url, data=json.dumps(moderation_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Moderator post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Moderator POST failed. Reason: %s", resp["reason"])
moderatorId = charlie["peerId"]
time.sleep(4)
# post profile for alice
with open('testdata/profile.json') as profile_file:
profile_json = json.load(profile_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/profile"
requests.post(api_url, data=json.dumps(profile_json, indent=4))
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
listing_json["metadata"]["pricingCurrency"] = "t" + self.cointype
slug = listing_json["slug"]
listing_json["moderators"] = [moderatorId]
listing_json["metadata"]["escrowTimeoutHours"] = 1000
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Listing POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
slug = resp["slug"]
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ob/listings/" + alice["peerId"]
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["moderator"] = moderatorId
order_json["paymentCoin"] = "t" + self.cointype
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
self.print_logs(alice, "ob.log")
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"wallet": self.cointype,
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice incorrectly saved as unfunded")
# alice send order fulfillment
with open('testdata/fulfillment.json') as fulfillment_file:
fulfillment_json = json.load(fulfillment_file, object_pairs_hook=OrderedDict)
fulfillment_json["slug"] = slug
fulfillment_json["orderId"] = orderId
api_url = alice["gateway_url"] + "ob/orderfulfillment"
r = requests.post(api_url, data=json.dumps(fulfillment_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Fulfillment post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Fulfillment POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# check bob received fulfillment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "FULFILLED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob failed to detect order fulfillment")
# check alice set fulfillment correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "FULFILLED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to order fulfillment")
# bob send order completion
with open('testdata/completion.json') as completion_file:
completion_json = json.load(completion_file, object_pairs_hook=OrderedDict)
completion_json["orderId"] = orderId
completion_json["ratings"][0]["slug"] = slug
api_url = bob["gateway_url"] + "ob/ordercompletion"
r = requests.post(api_url, data=json.dumps(completion_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Completion post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Completion POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# check alice received completion
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to detect order completion")
# check bob set completion correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob failed to order completion")
self.send_bitcoin_cmd("generate", 1)
time.sleep(2)
# Check the funds moved into alice's wallet
api_url = alice["gateway_url"] + "wallet/balance/" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
if confirmed <= 0:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to receive the multisig payout")
else:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Failed to query Alice's balance")
print("CompleteModeratedWithTimeout - PASS")
if __name__ == '__main__':
print("Running CompleteModeratedWithTimeout")
CompleteModeratedWithTimeout().main(["--regtest", "--disableexchangerates"])
| 48.253968 | 121 | 0.639967 |
7953e4f271e97bda7fd65bd269537afd0d10ab45 | 726 | py | Python | code/obtuse_angled_triangles/sol_210.py | bhavinjawade/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | 2 | 2020-07-16T08:16:32.000Z | 2020-10-01T07:16:48.000Z | code/obtuse_angled_triangles/sol_210.py | Psingh12354/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | null | null | null | code/obtuse_angled_triangles/sol_210.py | Psingh12354/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | 1 | 2021-05-07T18:06:08.000Z | 2021-05-07T18:06:08.000Z |
# -*- coding: utf-8 -*-
'''
File name: code\obtuse_angled_triangles\sol_210.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #210 :: Obtuse Angled Triangles
#
# For more information see:
# https://projecteuler.net/problem=210
# Problem Statement
'''
Consider the set S(r) of points (x,y) with integer coordinates satisfying |x| + |y| ≤ r.
Let O be the point (0,0) and C the point (r/4,r/4).
Let N(r) be the number of points B in S(r), so that the triangle OBC has an obtuse angle, i.e. the largest angle α satisfies 90°<α<180°.
So, for example, N(4)=24 and N(8)=100.
What is N(1,000,000,000)?
'''
# Solution
# Solution Approach
'''
'''
| 24.2 | 136 | 0.663912 |
7953e624e2d9ef81b41d2faeb390839cecb77735 | 3,280 | py | Python | src/AoC_2016/d16_dragon_checksum/dragon_checksum.py | derailed-dash/Advent-of-Code | 12378baf33ef4a59958e84eb60e795b6530c22ba | [
"MIT"
] | 9 | 2021-12-31T20:13:03.000Z | 2022-03-05T07:05:06.000Z | src/AoC_2016/d16_dragon_checksum/dragon_checksum.py | derailed-dash/Advent-of-Code | 12378baf33ef4a59958e84eb60e795b6530c22ba | [
"MIT"
] | 1 | 2022-01-25T08:35:04.000Z | 2022-01-29T00:07:00.000Z | src/AoC_2016/d16_dragon_checksum/dragon_checksum.py | derailed-dash/Advent-of-Code | 12378baf33ef4a59958e84eb60e795b6530c22ba | [
"MIT"
] | null | null | null | """
Author: Darren
Date: 07/08/2021
Solving https://adventofcode.com/2016/day/16
Part 1:
So trivial, doesn't really require explaining.
Part 2:
Same as part 1, but with a much longer target. No changes required.
"""
import logging
import os
import time
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
PART_1_TARGET_DATA_SIZE = 272
PART_2_TARGET_DATA_SIZE = 35651584
def main():
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(levelname)s:\t%(message)s")
# input_file = os.path.join(SCRIPT_DIR, SAMPLE_INPUT_FILE)
input_file = os.path.join(SCRIPT_DIR, INPUT_FILE)
with open(input_file, mode="rt") as f:
data = f.read()
dragon_data = generate_dragon_data(data, PART_1_TARGET_DATA_SIZE)
checksum = compute_checksum(dragon_data)
logging.info(f"Part 1 Checksum: {checksum}")
dragon_data = generate_dragon_data(data, PART_2_TARGET_DATA_SIZE)
checksum = compute_checksum(dragon_data)
logging.info(f"Part 2 Checksum: {checksum}")
def compute_checksum(dragon_data: str) -> str:
checksum = "" # initialise with even length
checksum_input = dragon_data # start by generating checksum from our dragon data
while len(checksum) % 2 == 0: # keep generating checksums from last checksum, until checksum is even length
checksum = ""
for i in range(0, len(checksum_input), 2): # increment 2 at a time
if checksum_input[i] == checksum_input[i+1]:
checksum += "1" # if these two adjacent chars are the same
else:
checksum += "0" # if these two adjacent chars are different
checksum_input = checksum
return checksum
def generate_dragon_data(data, target_size):
""" Runs the source data through dragon encoding,
until the resulting data is at least as large as the target size.
Then return exactly the first target_size chars.
Args:
data ([str]): Source data to encode
target_size ([int]): Target data size required
Returns:
[str]: Encoded data
"""
dragon_data = dragon_encode(data)
# repeat until we have enough characters
while len(dragon_data) < target_size:
dragon_data = dragon_encode(dragon_data)
# we only want the specified number of chars
dragon_data = dragon_data[:target_size]
return dragon_data
def dragon_encode(input_data) -> str:
""" Takes an initial state (the input_data) and then applies these transformations:
- Call the data you have at this point "a".
- Make a copy of "a"; call this copy "b".
- Reverse the order of the characters in "b".
- In "b", replace all instances of 0 with 1 and all 1s with 0.
- The resulting data is "a", then a single 0, then "b".
As a result, each transformation returns a str that is 2n+1 in length """
part_a = input_data
part_b = part_a[::-1].replace("0", "x").replace("1", "0").replace("x", "1")
result = part_a + "0" + part_b
return result
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
| 32.8 | 113 | 0.664939 |
7953e632472f52f8038aef97305b6c4d9b0596c1 | 4,701 | py | Python | ontask/settings.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | ontask/settings.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | ontask/settings.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Action configuration options."""
import os
import sys
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
############
#
# NOTIFICATION EMAILS
#
############
NOTIFICATION_TEMPLATE = getattr(
settings,
'EMAIL_ACTION_NOTIFICATION_TEMPLATE',
"""<html>
<head/>
<body>
<p>Dear {{ user.name }}</p>
<p>This message is to inform you that on {{ email_sent_datetime }}
{{ num_messages }} email{% if num_messages > 1 %}s{% endif %} were sent
resulting from the execution of the action with name "{{ action.name }}".</p>
{% if filter_present %}
<p>The action had a filter that reduced the number of messages from
{{ num_rows }} to {{ num_selected }}.</p>
{% else %}
<p>All the data rows stored in the workflow table were used.</p>
{% endif %}
Regards.
The OnTask Support Team
</body></html>""")
NOTIFICATION_SUBJECT = getattr(
settings,
'EMAIL_ACTION_NOTIFICATION_SUBJECT',
_('OnTask: Action executed'))
NOTIFICATION_SENDER = getattr(
settings,
'EMAIL_ACTION_NOTIFICATION_SENDER',
'ontask@ontasklearning.org')
############
#
# UPLOADS
#
############
CONTENT_TYPES = getattr(
settings,
'DATAOPS_CONTENT_TYPES',
'["text/csv", "application/json", "application/gzip"]')
MAX_UPLOAD_SIZE = getattr(settings, 'DATAOPS_MAX_UPLOAD_SIZE', 209715200)
############
#
# TRANSFORMATIONS AND MODELS
#
############
PLUGIN_DIRECTORY = getattr(
settings,
'DATAOPS_PLUGIN_DIRECTORY',
os.path.join(settings.BASE_DIR, 'plugins'))
# Get the plugin path in the sys.path
plugin_folder = PLUGIN_DIRECTORY
if not os.path.isabs(plugin_folder):
plugin_folder = os.path.join(settings.BASE_DIR, plugin_folder)
if plugin_folder not in sys.path:
sys.path.insert(0, plugin_folder)
############
#
# LOGS
#
############
MAX_LOG_LIST_SIZE = getattr(settings, 'LOGS_MAX_LIST_SIZE', 200)
############
#
# MISCELLANEOUS
#
############
HELP_URL = getattr(settings, 'ONTASK_HELP_URL', '')
if 'siteprefs' in settings.INSTALLED_APPS:
# Respect those users who doesn't have siteprefs installed.
from siteprefs.toolbox import (
patch_locals, register_prefs, pref, pref_group)
patch_locals() # That's bootstrap.
register_prefs(
pref_group(
_('Notification Emails'),
(
pref(
NOTIFICATION_TEMPLATE,
verbose_name=_('Template to send email notification'),
static=False,
field=models.TextField()),
pref(
NOTIFICATION_SUBJECT,
verbose_name=_('Subject line for notification messages'),
static=False,
field=models.CharField(max_length=1024)),
pref(
NOTIFICATION_SENDER,
verbose_name=_('"From:" field in notification emails'),
static=False,
field=models.CharField(max_length=1024)),
),
static=False),
pref_group(
_('Uploads'),
(
pref(
CONTENT_TYPES,
verbose_name=_('Content types allowed in uploads'),
static=False,
field=models.TextField(blank=True)),
pref(
MAX_UPLOAD_SIZE,
verbose_name=_('Maximum size allowed in file uplaods'),
static=False,
field=models.IntegerField(blank=True)),
),
static=False),
pref_group(
_('Transformations and Models'),
(
pref(
PLUGIN_DIRECTORY,
verbose_name=_('Folder where code packages are stored'),
static=False,
field=models.CharField(max_length=2048, blank=True)),
),
static=False),
pref_group(
_('Logs'),
(
pref(
MAX_LOG_LIST_SIZE,
verbose_name=_('Maximum number of logs shown to the user'),
static=False,
field=models.IntegerField(blank=True)),
),
static=False),
pref_group(
_('Miscellaneous'),
(
pref
(HELP_URL,
verbose_name=_(
'URL prefix to access the documentation'),
static=False,
field=models.CharField(max_length=256, blank=True)),
),
static=False),
)
| 27.982143 | 79 | 0.549458 |
7953e7b0a79fdeb43241ffa0ff1fe16376b25f8e | 4,062 | py | Python | digits/download_data/mnist.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
] | null | null | null | digits/download_data/mnist.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
] | null | null | null | digits/download_data/mnist.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
import gzip
import os
import struct
import numpy as np
import PIL.Image
from .downloader import DataDownloader
class MnistDownloader(DataDownloader):
"""
See details about the MNIST dataset here:
http://yann.lecun.com/exdb/mnist/
"""
def urlList(self):
return [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
def uncompressData(self):
for zipped, unzipped in [
('train-images-idx3-ubyte.gz', 'train-images.bin'),
('train-labels-idx1-ubyte.gz', 'train-labels.bin'),
('t10k-images-idx3-ubyte.gz', 'test-images.bin'),
('t10k-labels-idx1-ubyte.gz', 'test-labels.bin'),
]:
zipped_path = os.path.join(self.outdir, zipped)
assert os.path.exists(zipped_path), 'Expected "%s" to exist' % zipped
unzipped_path = os.path.join(self.outdir, unzipped)
if not os.path.exists(unzipped_path):
print("Uncompressing file=%s ..." % zipped)
with gzip.open(zipped_path) as infile, open(unzipped_path, 'wb') as outfile:
outfile.write(infile.read())
def processData(self):
self.__extract_images('train-images.bin', 'train-labels.bin', 'train')
self.__extract_images('test-images.bin', 'test-labels.bin', 'test')
def __extract_images(self, images_file, labels_file, phase):
"""
Extract information from binary files and store them as images
"""
labels = self.__readLabels(os.path.join(self.outdir, labels_file))
images = self.__readImages(os.path.join(self.outdir, images_file))
assert len(labels) == len(images), '%d != %d' % (len(labels), len(images))
output_dir = os.path.join(self.outdir, phase)
self.mkdir(output_dir, clean=True)
with open(os.path.join(output_dir, 'labels.txt'), 'w') as outfile:
for label in range(10):
outfile.write('%s\n' % label)
with open(os.path.join(output_dir, '%s.txt' % phase), 'w') as outfile:
for index, image in enumerate(images):
dirname = os.path.join(output_dir, labels[index])
self.mkdir(dirname)
filename = os.path.join(dirname, '%05d.%s' % (index, self.file_extension))
image.save(filename)
outfile.write('%s %s\n' % (filename, labels[index]))
def __readLabels(self, filename):
"""
Returns a list of ints
"""
print('Reading labels from %s ...' % filename)
labels = []
with open(filename, 'rb') as infile:
infile.read(4) # ignore magic number
count = struct.unpack('>i', infile.read(4))[0]
data = infile.read(count)
for byte in data:
label = struct.unpack('>B', byte)[0]
labels.append(str(label))
return labels
def __readImages(self, filename):
"""
Returns a list of PIL.Image objects
"""
print('Reading images from %s ...' % filename)
images = []
with open(filename, 'rb') as infile:
infile.read(4) # ignore magic number
count = struct.unpack('>i', infile.read(4))[0]
rows = struct.unpack('>i', infile.read(4))[0]
columns = struct.unpack('>i', infile.read(4))[0]
for i in range(count):
data = infile.read(rows * columns)
image = np.fromstring(data, dtype=np.uint8)
image = image.reshape((rows, columns))
image = 255 - image # now black digit on white background
images.append(PIL.Image.fromarray(image))
return images
| 40.217822 | 92 | 0.573363 |
7953e831915c6628cb019c3adbc19ade324a7de9 | 1,247 | py | Python | 100-200q/116.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 990 | 2018-06-05T11:49:22.000Z | 2022-03-31T08:59:17.000Z | 100-200q/116.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 1 | 2021-11-01T01:29:38.000Z | 2021-11-01T01:29:38.000Z | 100-200q/116.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 482 | 2018-06-12T22:16:53.000Z | 2022-03-29T00:23:29.000Z | '''
Given a binary tree
struct TreeLinkNode {
TreeLinkNode *left;
TreeLinkNode *right;
TreeLinkNode *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL
Example:
Given the following perfect binary tree,
1
/ \
2 3
/ \ / \
4 5 6 7
After calling your function, the tree should look like:
1 -> NULL
/ \
2 -> 3 -> NULL
/ \ / \
4->5->6->7 -> NULL
'''
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
def recursive(node):
if node is None:
return
if node.left:
node.left.next = node.right
if node.right:
if node.next:
node.right.next = node.next.left
else:
node.right.next = None
recursive(node.left)
recursive(node.right)
if root != None:
root.next = None
recursive(root) | 21.5 | 132 | 0.575782 |
7953eacf220d07c08b680d83faf5dbad33525914 | 15,567 | py | Python | jobs/signing/sign-rhacs/umb_producer.py | brenton/aos-cd-jobs | 34e427bb7091c52791bc93a34f062e57dc005082 | [
"Apache-2.0"
] | null | null | null | jobs/signing/sign-rhacs/umb_producer.py | brenton/aos-cd-jobs | 34e427bb7091c52791bc93a34f062e57dc005082 | [
"Apache-2.0"
] | null | null | null | jobs/signing/sign-rhacs/umb_producer.py | brenton/aos-cd-jobs | 34e427bb7091c52791bc93a34f062e57dc005082 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
import base64
import json
import logging
import ssl
import subprocess
import sys
import threading
import click
import requests
from rhmsg.activemq.producer import AMQProducer
from rhmsg.activemq.consumer import AMQConsumer
# Expose errors during signing for debugging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
######################################################################
URLS = {
'dev': (
'amqps://messaging-devops-broker03.dev1.ext.devlab.redhat.com:5671',
'amqps://messaging-devops-broker04.dev1.ext.devlab.redhat.com:5671',
),
'qa': (
'amqps://messaging-devops-broker03.web.qa.ext.phx1.redhat.com:5671',
'amqps://messaging-devops-broker04.web.qa.ext.phx1.redhat.com:5671',
),
'stage': (
'amqps://messaging-devops-broker03.web.stage.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker04.web.stage.ext.phx2.redhat.com:5671',
),
'prod': (
'amqps://messaging-devops-broker03.web.prod.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker04.web.prod.ext.phx2.redhat.com:5671',
),
}
TOPIC = 'VirtualTopic.eng.art.artifact.sign'
# TODO: In the future we need to handle 'rhcos' having '4.1'
# hard-coded into the URL path.
MESSAGE_DIGESTS = {
'openshift': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/clients/{release_stage}/{release_name}/sha256sum.txt',
'rhcos': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/dependencies/rhcos/{release_name_xy}/{release_name}/sha256sum.txt'
}
DEFAULT_CA_CHAIN = "/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt"
# This is the JSON we send OVER the bus when requesting signatures
SIGN_REQUEST_MESSAGE_FIELDS = [
"artifact",
# Added by ART
"artifact_meta",
"request_id",
"requestor",
"sig_keyname",
]
ART_CONSUMER = 'Consumer.openshift-art-signatory.{env}.VirtualTopic.eng.robosignatory.art.sign'
def get_release_tag(release_name, arch):
"""Determine the quay destination tag where a release image lives, based on the
release name and arch (since we can now have multiple arches for each release name)
- make sure it includes the arch in the tag to distinguish from any other releases of same name.
e.g.:
(4.2.0-0.nightly-s390x-2019-12-10-202536, s390x) remains 4.2.0-0.nightly-s390x-2019-12-10-202536
(4.3.0-0.nightly-2019-12-07-121211, x86_64) becomes 4.3.0-0.nightly-2019-12-07-121211-x86_64
"""
return release_name if arch in release_name else "{}-{}".format(release_name, arch)
######################################################################
# Click stuff! Define these here and reuse them later because having
# 'required' options in the global context creates a poor user
# experience. Running "this-script <sub-command> --help" won't work
# until every global required option is provided.
context_settings = dict(help_option_names=['-h', '--help'])
requestor = click.option("--requestor", required=True, metavar="USERID",
help="The user who requested the signature")
product = click.option("--product", required=True,
type=click.Choice(["openshift", "rhcos"]),
help="Which product this signature is for")
request_id = click.option("--request-id", required=True, metavar="BUILDURL",
help="Unique build job identifier for this signing request, "
"use the job URL from Jenkins: $env.BUILD_URL")
sig_keyname = click.option("--sig-keyname", required=True,
type=click.Choice(['test', 'redhatrelease2', 'beta2']),
help="Name of the key to have sign our request")
release_name_opt = click.option("--release-name", required=True, metavar="SEMVER",
help="Numerical name of this release, for example: 4.1.0-rc.10")
arch_opt = click.option("--arch", required=True, metavar="ARCHITECTURE",
type=click.Choice(['x86_64', 'ppc64le', 's390x']),
help="Which architecture this release was built for")
client_type = click.option("--client-type", required=True, metavar="VAL",
help="What repo is this image for?")
client_cert = click.option("--client-cert", required=True, metavar="CERT-PATH",
type=click.Path(exists=True),
help="Path to the client certificate for UMB authentication")
client_key = click.option("--client-key", required=True, metavar="KEY-PATH",
type=click.Path(exists=True),
help="Path to the client key for UMB authentication")
env = click.option("--env", required=False, metavar="ENVIRONMENT",
default='stage',
type=click.Choice(['dev', 'stage', 'prod']),
help="Which UMB environment to send to")
noop = click.option("--noop", type=bool, is_flag=True, default=False,
help="If given, DO NOT request signature, "
"show the JSON that WOULD be sent over the bus")
ca_certs = click.option("--ca-certs", type=click.Path(exists=True),
default=DEFAULT_CA_CHAIN,
help="Manually specify the path to the RHIT CA Trust Chain. "
"Default: {}".format(DEFAULT_CA_CHAIN))
digest = click.option("--digest", metavar="DIGEST", help="Pass the digest that should be signed")
# ---------------------------------------------------------------------
@click.group(context_settings=context_settings)
def cli(**kwargs):
"""Helper utility for internal Red Hat use ONLY. Use in a build job to
request signatures for various artifacts produced as part of an
Openshift 4.x release. Signatures are requested by sending a JSON blob
over the Universal Message Bus to the 'robosignatory' (RADAS).
You may override the default path to look for the Red Hat IT
Certificate Authority trust chain by using the --ca-certs option in
the global context (before the sub-command).
"""
pass
######################################################################
# Helpers
def get_digest_base64(location):
"""Download the sha256sum.txt message digest file at the given
`location`.
:return: A `string` of the base64-encoded message digest
"""
res = requests.get(location,
verify=ssl.get_default_verify_paths().openssl_cafile)
if res.status_code == 200:
# b64encode needs a bytes type input, use the dedicated
# 'encode' method to turn str=>bytes. The result of
# `b64encode` is a bytes type. Later when we go to serialize
# this with json it needs to be a str type so we will decode
# the bytes=>str now.
return base64.b64encode(res.text.encode()).decode()
else:
raise(Exception(res.reason))
def presend_validation(message):
"""Verify the message we want to send over the bus has all the
required fields
"""
for field in SIGN_REQUEST_MESSAGE_FIELDS:
if field not in message:
return field
return True
def oc_image_info(pullspec):
"""Get metadata for an image at the given `pullspec`
:return: a dict with the serialzed JSON from the 'oc image info'
call
"""
image_info_raw = subprocess.check_output(
['oc', 'image', 'info', '-o', 'json', pullspec])
return json.loads(image_info_raw)
def get_bus_producer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a producer. We're going to
need this in multiple places so we want to ensure we do it the
same way each time.
"""
return AMQProducer(urls=URLS[env or 'stage'],
certificate=certificate,
private_key=private_key,
trusted_certificates=trusted_certificates,
topic=TOPIC)
def producer_thread(producer, args):
print(args)
producer.send_msg(*args)
def producer_send_msg(producer, *args):
t = threading.Thread(target=producer_thread, args=(producer, args))
t.start()
t.join()
def get_bus_consumer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a consumer. We're going to
do need this in multiple places though, so we want to ensure we do it
the same way each time.
"""
return AMQConsumer(urls=URLS[env or 'stage'], certificate=certificate,
private_key=private_key, trusted_certificates=trusted_certificates)
def art_consumer_callback(msg, notsure):
"""`msg` is a `Message` object which has various attributes. Such as `body`.
`notsure` I am not sure what that is. I only got as far as knowing
this callback requires two parameters.
"""
print(msg)
body = json.loads(msg.body)
print(json.dumps(body, indent=4))
if body['msg']['signing_status'] != 'success':
print("ERROR: robosignatory failed to sign artifact")
exit(1)
else:
# example: https://datagrepper.stage.engineering.redhat.com/id?id=2019-0304004b-d1e6-4e03-b28d-cfa1e5f59948&is_raw=true&size=extra-large
result = body['msg']['signed_artifact']
out_file = body['msg']['artifact_meta']['name']
with open(out_file, 'w') as fp:
fp.write(base64.decodestring(result))
fp.flush()
print("Wrote {} to disk".format(body['msg']['artifact_meta']['name']))
return True
def consumer_thread(consumer):
consumer.consume(ART_CONSUMER.format(env=env), art_consumer_callback)
def consumer_start(consumer):
t = threading.Thread(target=consumer_thread, args=(consumer,))
t.start()
return t
def get_producer_consumer(env, certificate, private_key, trusted_certificates):
producer = get_bus_producer(env, certificate, private_key, trusted_certificates)
consumer = get_bus_consumer(env, certificate, private_key, trusted_certificates)
return (producer, consumer)
######################################################################
@cli.command("message-digest", short_help="Sign a sha256sum.txt file")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env
@noop
@ca_certs
@arch_opt
@click.pass_context
def message_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, arch):
"""Sign a 'message digest'. These are sha256sum.txt files produced by
the 'sha256sum` command (hence the strange command name). In the ART
world, this is for signing message digests from extracting OpenShift
tools, as well as RHCOS bare-betal message digests.
"""
if product == 'openshift':
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name=release_name,
release_stage=client_type)
elif product == 'rhcos':
release_parts = release_name.split('.')
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name_xy='.'.join(release_parts[:2]),
release_name=release_name)
artifact = get_digest_base64(artifact_url)
message = {
"artifact": artifact,
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": "sha256sum.txt.gpg",
"type": "message-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
@cli.command("json-digest", short_help="Sign a JSON digest claim")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env
@noop
@ca_certs
@digest
@arch_opt
@click.pass_context
def json_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, digest, arch):
"""Sign a 'json digest'. These are JSON blobs that associate a
pullspec with a sha256 digest. In the ART world, this is for "signing
payload images". After the json digest is signed we publish the
signature in a location which follows a specific directory pattern,
thus allowing the signature to be looked up programmatically.
"""
json_claim = {
"critical": {
"image": {
"docker-manifest-digest": None
},
"type": "atomic container signature",
"identity": {
"docker-reference": None,
}
},
"optional": {
"creator": "Red Hat OpenShift Signing Authority 0.0.1",
},
}
release_stage = client_type
release_tag = release_name
pullspec = "registry.redhat.io/rh-acs/{}:{}".format(release_stage, release_tag)
json_claim['critical']['identity']['docker-reference'] = pullspec
if not digest:
digest = oc_image_info(pullspec)['digest']
json_claim['critical']['image']['docker-manifest-digest'] = digest
print("ARTIFACT to send for signing (WILL BE base64 encoded first):")
print(json.dumps(json_claim, indent=4))
message = {
"artifact": base64.b64encode(json.dumps(json_claim).encode()).decode(),
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": json_claim['critical']['image']['docker-manifest-digest'].replace(':', '='),
"type": "json-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
if __name__ == '__main__':
cli()
| 37.783981 | 144 | 0.638081 |
7953eba9ff2847006316ca6e24ffcaec01e2f39c | 304 | py | Python | LeetCode/5413.py | LauZyHou/- | 66c047fe68409c73a077eae561cf82b081cf8e45 | [
"MIT"
] | 7 | 2019-02-25T13:15:00.000Z | 2021-12-21T22:08:39.000Z | LeetCode/5413.py | LauZyHou/- | 66c047fe68409c73a077eae561cf82b081cf8e45 | [
"MIT"
] | null | null | null | LeetCode/5413.py | LauZyHou/- | 66c047fe68409c73a077eae561cf82b081cf8e45 | [
"MIT"
] | 1 | 2019-04-03T06:12:46.000Z | 2019-04-03T06:12:46.000Z | class Solution:
def arrangeWords(self, text: str) -> str:
mylist = text.split(" ")
mylist = [s.lower() for s in mylist]
mylist.sort(key=lambda x: len(x))
if len(mylist) > 0:
mylist[0] = mylist[0][0:1].upper() + mylist[0][1:]
return " ".join(mylist) | 38 | 62 | 0.532895 |
7953ec77cf0e443c5b960fbcbd7c423b76044121 | 5,492 | py | Python | src/spaceone/inventory/manager/compute_engine/snapshot_manager.py | spaceone-dev/plugin-google-cloud-inven-collector | 3e103412e7598ee9fa5f68b6241a831a40e8b9bc | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/compute_engine/snapshot_manager.py | spaceone-dev/plugin-google-cloud-inven-collector | 3e103412e7598ee9fa5f68b6241a831a40e8b9bc | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/compute_engine/snapshot_manager.py | spaceone-dev/plugin-google-cloud-inven-collector | 3e103412e7598ee9fa5f68b6241a831a40e8b9bc | [
"Apache-2.0"
] | null | null | null | import time
import logging
from spaceone.inventory.libs.manager import GoogleCloudManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.compute_engine.snapshot import SnapshotConnector
from spaceone.inventory.model.compute_engine.snapshot.cloud_service_type import CLOUD_SERVICE_TYPES
from spaceone.inventory.model.compute_engine.snapshot.cloud_service import SnapshotResource, SnapshotResponse
from spaceone.inventory.model.compute_engine.snapshot.data import Snapshot
_LOGGER = logging.getLogger(__name__)
class SnapshotManager(GoogleCloudManager):
connector_name = 'SnapshotConnector'
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
_LOGGER.debug(f'** Snapshot START **')
start_time = time.time()
"""
Args:
params:
- options
- schema
- secret_data
- filter
- zones
Response:
CloudServiceResponse/ErrorResourceResponse
"""
collected_cloud_services = []
error_responses = []
snapshot_id = ""
secret_data = params['secret_data']
project_id = secret_data['project_id']
##################################
# 0. Gather All Related Resources
# List all information through connector
##################################
snapshot_conn: SnapshotConnector = self.locator.get_connector(self.connector_name, **params)
# Get lists that relate with snapshots through Google Cloud API
snapshots = snapshot_conn.list_snapshot()
for snapshot in snapshots:
try:
##################################
# 1. Set Basic Information
##################################
snapshot_id = snapshot.get('id')
region = self.get_matching_region(snapshot.get('storageLocations'))
labels = self.convert_labels_format(snapshot.get('labels', {}))
snapshot.update({
'project': secret_data['project_id'],
'disk': self.get_disk_info(snapshot),
'creation_type': 'Scheduled' if snapshot.get('autoCreated') else 'Manual',
'encryption': self.get_disk_encryption_type(snapshot.get('snapshotEncryptionKey')),
'labels': labels
})
##################################
# 2. Make Base Data
##################################
snapshot_data = Snapshot(snapshot, strict=False)
_name = snapshot_data.get('name', '')
##################################
# 3. Make Return Resource
##################################
# labels -> tags
snapshots_resource = SnapshotResource({
'name': _name,
'account': project_id,
'region_code': region.get('region_code'),
'data': snapshot_data,
'tags': labels,
'reference': ReferenceModel(snapshot_data.reference())
})
##################################
# 4. Make Collected Region Code
##################################
self.set_region_code(region.get('region_code'))
##################################
# 5. Make Resource Response Object
# List of LoadBalancingResponse Object
##################################
collected_cloud_services.append(SnapshotResponse({'resource': snapshots_resource}))
except Exception as e:
_LOGGER.error(f'[collect_cloud_service] => {e}', exc_info=True)
error_response = self.generate_resource_error_response(e, 'ComputeEngine', 'Snapshot', snapshot_id)
error_responses.append(error_response)
_LOGGER.debug(f'** SnapShot Finished {time.time() - start_time} Seconds **')
return collected_cloud_services, error_responses
def get_matching_region(self, svc_location):
region_code = svc_location[0] if svc_location else 'global'
matched_info = self.match_region_info(region_code)
return {'region_code': region_code, 'location': 'regional'} if matched_info \
else {'region_code': 'global', 'location': 'multi'}
def get_disk_info(self, snapshot):
'''
source_disk = StringType()
source_disk_display = StringType()
source_disk_id = StringType()
diskSizeGb = IntType()
disk_size_display = StringType()
storage_bytes = IntType()
storage_bytes_display = StringType()
'''
disk_gb = snapshot.get('diskSizeGb', 0.0)
st_byte = snapshot.get('storageBytes', 0)
size = self._get_bytes(int(disk_gb))
return {
'source_disk': snapshot.get('sourceDisk', ''),
'source_disk_display': self.get_param_in_url(snapshot.get('sourceDisk', ''), 'disks'),
'source_disk_id': snapshot.get('sourceDiskId', ''),
'disk_size': float(size),
'storage_bytes': int(st_byte)
}
@staticmethod
def _get_bytes(number):
return 1024 * 1024 * 1024 * number
| 41.293233 | 115 | 0.542972 |
7953ec810c743dd0e829667eec9582346bf5426c | 2,555 | py | Python | tests/test_minimal_tests.py | cmadjar/conp-dataset | d67eec79978f3f050245bba2aff8a1b514a87263 | [
"MIT"
] | null | null | null | tests/test_minimal_tests.py | cmadjar/conp-dataset | d67eec79978f3f050245bba2aff8a1b514a87263 | [
"MIT"
] | null | null | null | tests/test_minimal_tests.py | cmadjar/conp-dataset | d67eec79978f3f050245bba2aff8a1b514a87263 | [
"MIT"
] | null | null | null | """Test the minimalTest function in create_tests.py.
This function should return the minimal set of test to execute.
"""
import git
import pytest
from tests.create_tests import minimal_tests
@pytest.fixture(autouse=True)
def retrieve_submodule():
"""Fixture to get all the CONP datasets before a test."""
pytest.datasets = {x.path for x in git.Repo(".").submodules}
yield
@pytest.mark.parametrize("pr_files", [[],])
def test_empty_pr(pr_files):
"""Test pull requests that modify no file."""
assert minimal_tests(pytest.datasets, pr_files) == []
@pytest.mark.parametrize(
"pr_files",
[("projects/preventad-open",), ("projects/PERFORM_Dataset__one_control_subject",),],
)
def test_modify_single_project(pr_files):
"""Test pull requests that modify a single project."""
pr_files = list(pr_files)
assert minimal_tests(pytest.datasets, pr_files) == pr_files
@pytest.mark.parametrize(
"pr_files",
[
("projects/preventad-open", "projects/PERFORM_Dataset__one_control_subject"),
("projects/openpain/BrainNetworkChange_Mano", "projects/preventad-open"),
],
)
def test_modify_multi_project(pr_files):
"""Test pull requests that modify multiple project."""
pr_files = list(pr_files)
assert minimal_tests(pytest.datasets, pr_files) == pr_files
@pytest.mark.parametrize(
"pr_files, valid",
[
(("projects/preventad-open", "README.md"), ("projects/preventad-open",)),
(("LICENSE",), []),
],
)
def test_modify_whitelist(pr_files, valid):
"""Test pull requests that modify a file in the exact whitelist."""
pr_files = list(pr_files)
valid = list(valid)
assert minimal_tests(pytest.datasets, pr_files) == valid
@pytest.mark.parametrize(
"pr_files, valid",
[
(("projects/preventad-open", ".datalad"), ("projects/preventad-open",)),
(("metadata/examples",), []),
],
)
def test_modify_whitelist_exact(pr_files, valid):
"""Test pull requests that modify a file in the whitelist."""
pr_files = list(pr_files)
valid = list(valid)
assert minimal_tests(pytest.datasets, pr_files) == valid
@pytest.mark.parametrize(
"pr_files",
[
(".travis.yml", "projects/preventad-open", "README.md"),
("tests/functions.py", "projects/preventad-open"),
("scripts/crawl_zenodo.py",),
],
)
def test_run_all(pr_files):
"""Test pull request that need to execute all tests."""
pr_files = list(pr_files)
assert minimal_tests(pytest.datasets, pr_files) == pytest.datasets
| 29.367816 | 88 | 0.6818 |
7953ec8c3a17be15ad3bd7ee9d46dd6127805815 | 12,170 | py | Python | HW2/HW2.py | masher1/SocialMediaMining | 615205159f363bffd8d6cd8fd32afd65cdfe4332 | [
"MIT"
] | null | null | null | HW2/HW2.py | masher1/SocialMediaMining | 615205159f363bffd8d6cd8fd32afd65cdfe4332 | [
"MIT"
] | null | null | null | HW2/HW2.py | masher1/SocialMediaMining | 615205159f363bffd8d6cd8fd32afd65cdfe4332 | [
"MIT"
] | null | null | null | import twitter
import json
import networkx
import matplotlib
import sys
import numpy
import sys
import time
from functools import partial
from sys import maxsize as maxint
from urllib.error import URLError
from http.client import BadStatusLine
import matplotlib.pyplot as plt
import Chapter_9Twitter_Cookbook
def oauth_login():
#Authorization removed for Security Reasons
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
# def get_user_profile(twitter_api, screen_names=None, user_ids=None):
# # Must have either screen_name or user_id (logical xor)
# assert (screen_names != None) != (user_ids != None), "Must have screen_names or user_ids, but not both"
#
# items_to_info = {}
#
# items = screen_names or user_ids
#
# while len(items) > 0:
#
# # Process 100 items at a time per the API specifications for /users/lookup.
# # See http://bit.ly/2Gcjfzr for details.
#
# items_str = ','.join([str(item) for item in items[:100]])
# items = items[100:]
#
# if screen_names:
# response = make_twitter_request(twitter_api.users.lookup,
# screen_name=items_str)
# else: # user_ids
# response = make_twitter_request(twitter_api.users.lookup,
# user_id=items_str)
#
# for user_info in response:
# if screen_names:
# items_to_info[user_info['screen_name']] = user_info
# else: # user_ids
# items_to_info[user_info['id']] = user_info
#
# return items_to_info
#
# def make_twitter_request(twitter_api_func, max_errors=10, *args, **kw):
# # A nested helper function that handles common HTTPErrors. Return an updated
# # value for wait_period if the problem is a 500 level error. Block until the
# # rate limit is reset if it's a rate limiting issue (429 error). Returns None
# # for 401 and 404 errors, which requires special handling by the caller.
# def handle_twitter_http_error(e, wait_period=2, sleep_when_rate_limited=True):
#
# if wait_period > 3600: # Seconds
# print('Too many retries. Quitting.', file=sys.stderr)
# raise e
#
# # See https://developer.twitter.com/en/docs/basics/response-codes
# # for common codes
#
# if e.e.code == 401:
# print('Encountered 401 Error (Not Authorized)', file=sys.stderr)
# return None
# elif e.e.code == 404:
# print('Encountered 404 Error (Not Found)', file=sys.stderr)
# return None
# elif e.e.code == 429:
# print('Encountered 429 Error (Rate Limit Exceeded)', file=sys.stderr)
# if sleep_when_rate_limited:
# print("Retrying in 15 minutes...ZzZ...", file=sys.stderr)
# sys.stderr.flush()
# time.sleep(60 * 15 + 5)
# print('...ZzZ...Awake now and trying again.', file=sys.stderr)
# return 2
# else:
# raise e # Caller must handle the rate limiting issue
# elif e.e.code in (500, 502, 503, 504):
# print('Encountered {0} Error. Retrying in {1} seconds'.format(e.e.code, wait_period), file=sys.stderr)
# time.sleep(wait_period)
# wait_period *= 1.5
# return wait_period
# else:
# raise e
#
# # End of nested helper function
#
# wait_period = 2
# error_count = 0
#
# while True:
# try:
# return twitter_api_func(*args, **kw)
# except twitter.api.TwitterHTTPError as e:
# error_count = 0
# wait_period = handle_twitter_http_error(e, wait_period)
# if wait_period is None:
# return
# except URLError as e:
# error_count += 1
# time.sleep(wait_period)
# wait_period *= 1.5
# print("URLError encountered. Continuing.", file=sys.stderr)
# if error_count > max_errors:
# print("Too many consecutive errors...bailing out.", file=sys.stderr)
# raise
# except BadStatusLine as e:
# error_count += 1
# time.sleep(wait_period)
# wait_period *= 1.5
# print("BadStatusLine encountered. Continuing.", file=sys.stderr)
# if error_count > max_errors:
# print("Too many consecutive errors...bailing out.", file=sys.stderr)
# raise
#
# def get_friends_followers_ids(twitter_api, screen_name=None, user_id=None,
# friends_limit=maxint, followers_limit=maxint):
# # Must have either screen_name or user_id (logical xor)
# assert (screen_name != None) != (user_id != None), \
# "Must have screen_name or user_id, but not both"
#
# # See http://bit.ly/2GcjKJP and http://bit.ly/2rFz90N for details
# # on API parameters
#
# get_friends_ids = partial(make_twitter_request, twitter_api.friends.ids,
# count=5000)
# get_followers_ids = partial(make_twitter_request, twitter_api.followers.ids,
# count=5000)
#
# friends_ids, followers_ids = [], []
#
# for twitter_api_func, limit, ids, label in [
# [get_friends_ids, friends_limit, friends_ids, "friends"],
# [get_followers_ids, followers_limit, followers_ids, "followers"]
# ]:
#
# if limit == 0: continue
#
# cursor = -1
# while cursor != 0:
#
# # Use make_twitter_request via the partially bound callable...
# if screen_name:
# response = twitter_api_func(screen_name=screen_name, cursor=cursor)
# else: # user_id
# response = twitter_api_func(user_id=user_id, cursor=cursor)
#
# if response is not None:
# ids += response['ids']
# cursor = response['next_cursor']
#
# print('Fetched {0} total {1} ids for {2}'.format(len(ids), \
# label, (user_id or screen_name)), file=sys.stderr)
#
# # XXX: You may want to store data during each iteration to provide an
# # an additional layer of protection from exceptional circumstances
#
# if len(ids) >= limit or response is None:
# break
#
# # Do something useful with the IDs, like store them to disk...
# return friends_ids[:friends_limit], followers_ids[:followers_limit]
#
# def setwise_friends_followers_analysis(screen_name, friends_ids, followers_ids):
# friends_ids, followers_ids = set(friends_ids), set(followers_ids)
# print('{0} is following {1}'.format(screen_name, len(friends_ids)))
# print('{0} is being followed by {1}'.format(screen_name, len(followers_ids)))
# print('{0} of {1} are not following {2} back'.format(
# len(friends_ids.difference(followers_ids)),
# len(friends_ids), screen_name))
# print('{0} of {1} are not being followed back by {2}'.format(
# len(followers_ids.difference(friends_ids)),
# len(followers_ids), screen_name))
# print('{0} has {1} mutual friends'.format(
# screen_name, len(friends_ids.intersection(followers_ids))))
def get_nameID(screenName):
try:
screenName = int(screenName)
user = json.loads(json.dumps(twitter_api.users.lookup(user_id=screenName), indent=4))
except:
name = screenName.replace(" ","")
user = json.loads(json.dumps(twitter_api.users.lookup(screen_name=name), indent=4))
user = user[0]
name_id = (user["id"],user["name"])
return name_id
def get_resp_friends(twitter_api, parent, depth, graph):
if depth > 0:
top_5 = find_top_5_frineds(twitter_api, parent[0])
print("Parent:")
print(parent)
print("Top 5 Friends:")
print(top_5)
add_top_5(parent,top_5, graph)
for i in top_5:
get_resp_friends(twitter_api, i,depth-1,graph)
def find_top_5_frineds(twitter_api, user_id):
friends = []
num_followers = []
names = []
#TODO Make sure that the Top 5 are based on top Popular friends which is based on their followers_count
try:
following, followers = Chapter_9Twitter_Cookbook.get_friends_followers_ids(twitter_api, user_id=user_id, friends_limit=maxint, followers_limit=maxint)
following = set(following)
followers = set(followers)
print("Following:", followers)
print("Followers:", following)
#followers = set(json.loads(json.dumps(twitter_api.followers.lookup(user_id=user_id), count=5000), indent=4))["ids"]
#following = set(json.loads(json.dumps(twitter_api.friends.lookup(user_id=user_id), count=5000), indent=4))["ids"]
reciprocal = followers.intersection(following)
reciprocal = list(reciprocal)
print("Reciprocals:", reciprocal)
p = 0
# reciprocal
if len(reciprocal) < 15:
num = len(reciprocal)
else:
num = 15
for i in range(num):# used to be 15
num_foll = json.loads(json.dumps(twitter_api.users.lookup(user_id=reciprocal[i]),indent = 4))
num_foll = num_foll[0]
num_followers.append(num_foll["followers_count"])
names.append(num_foll["name"])
print(p)
p += 1
print("Here")
five_index = max_5_index(num_followers)
try:
for j in five_index:
friends.append((reciprocal[j], names[j]))
except:
print("Unexpected error:", sys.exc_info())
print("These are the friends:")
print(friends)
return friends
except:
print("Unexpected error:", sys.exc_info()[0])
return friends
def make_resp_tree(twitter_api,user_ID,depth):
tree = networkx.nx.Graph()
seed = get_nameID(user_ID)
tree.add_node((seed[0],seed[1]))
get_resp_friends(twitter_api,seed,depth,tree)
return tree
def add_top_5(parent, children, graph):
for i in children:
graph.add_node((i[0],i[1]))
graph.add_edge((parent[0], parent[1]), (i[0],i[1]))
def max_5_index(lister):
indexes = []
list = lister
def looper (list, acc, num):
if num >= 0:
maxim = list[0]
max_index = 0
for i in range(len(list)):
if(list[i]>maxim):
maxim = list[i]
max_index =i
acc.append(max_index)
list[max_index] = 0
looper(list, acc, num-1)
looper(list, indexes, 5)
print("The indexes are:")
print(indexes)
return indexes
if __name__ == '__main__':
twitter_api = oauth_login()
#hamsterlanda
#magicmathur3
screenName = "hamsterlanda"
tree = make_resp_tree(twitter_api,get_nameID(screenName)[0],2)
string = screenName, "'s SociaL Network"
networkx.nx.draw(tree, arrows = True, with_labels=1, label = string)
print("Diameter of the tree:")
print(networkx.nx.diameter(tree))
print("Shortest Path Length:")
print(networkx.nx.average_shortest_path_length(tree))
#print(json.dumps(twitter_api.users.lookup(user_id=758093293397868544),indent =4) )
plt.show()
# screen_name = "magicmathur3"
# friends_ids, followers_ids = get_friends_followers_ids(twitter_api,
# screen_name="thisisibrahimd",
# friends_limit=maxint,
# followers_limit=maxint)
# print("Following:", len(friends_ids))
# print("Following:", friends_ids)
# print("Followers:", len(followers_ids))
# print("Followers:", followers_ids)
#
# friends = {friends_ids}
# followers = {followers_ids}
# print("Following:", friends)
# print("Followers:", followers)
| 38.881789 | 158 | 0.598932 |
7953ed0718046b60e614848b3ce90929362265e1 | 4,584 | py | Python | sympy/register.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 7adab3877fc1d3f1d5f57e6c1743dae8f76f72c5 | [
"Apache-2.0"
] | 3,266 | 2017-08-06T16:51:46.000Z | 2022-03-30T07:34:24.000Z | sympy/register.py | nuhaltinsoy/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 6017441f2d476f9c6c568dd886da43c6c0fd89bd | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | sympy/register.py | nuhaltinsoy/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 6017441f2d476f9c6c568dd886da43c6c0fd89bd | [
"Apache-2.0"
] | 1,449 | 2017-08-06T17:40:59.000Z | 2022-03-31T12:03:24.000Z | import numpy as np
import numpy.random as random
import matplotlib.pyplot as plt
class QuantumRegister(object):
def __init__(self, n_qubits):
self.n_qubits = n_qubits
self.n_states = 2 ** n_qubits
self.qubits = np.zeros(self.n_states, dtype=complex)
self.qubits[0] = 1.0
def reset(self):
self.qubits = np.zeros(self.n_states, dtype=complex)
self.qubits[0] = 1.0
# REGISER MANIPULATION
def isset(self, state, n):
return state & 1 << (self.n_qubits - 1 - n) != 0
def flip(self, state, n):
return state ^ 1 << (self.n_qubits - 1 - n)
def set_qubit(self, n, a, b): # a|0>+b|1>
tmp_qubits = np.zeros(self.n_states, dtype=complex)
for state in range(self.n_states):
current_amplitude = self.qubits[state] + self.qubits[self.flip(state, n)]
if self.isset(state, n):
tmp_qubits[state] = current_amplitude * b
else:
tmp_qubits[state] = current_amplitude * a
self.qubits = tmp_qubits
# MEASUREMENT OPERATIONS
def measure(self):
probabilities = np.absolute(self.qubits)**2
return random.choice(len(probabilities), p=probabilities.flatten())
def probability(self, qubits):
assert len(qubits) == self.n_qubits
probability = 0.0
for state in range(self.n_states):
selected = True
for i in range(self.n_qubits):
if qubits[i] is not None:
selected &= (self.isset(state, i) == qubits[i])
if selected:
probability += np.absolute(self.qubits[i])**2
print(state, selected, probability)
return probability
# QUANTUM GATES
def hadamar(self, qubits=None):
if qubits is None:
qubits = [1] * self.n_qubits
H = 1. / np.sqrt(2) * np.array([[1., 1.], [1., -1.]])
m = np.array([1])
for indicator in reversed(qubits):
m = np.kron(H, m) if indicator else np.kron(np.eye(2), m)
self.qubits = m.dot(self.qubits)
return self
def hadamar_alternative(self):
hadamar = np.zeros((self.n_states, self.n_states))
for target in range(self.n_states):
for state in range(self.n_states):
hadamar[target, state] = (2.**(-self.n_qubits / 2.))*(-1)**bin(state & target).count("1")
self.qubits = hadamar.dot(self.qubits)
return self
def cswap(self, c, a, b):
cswap = np.zeros((self.n_states, self.n_states))
for state in range(self.n_states):
if self.isset(state, c):
if self.isset(state, a) != self.isset(state, b):
flipstate = self.flip(self.flip(state, b), a)
cswap[state, flipstate] = 1.0
else:
cswap[state, state] = 1.0
else:
cswap[state, state] = 1.0
self.qubits = cswap.dot(self.qubits)
return self
# IMPLEMENTATION ESSENTIALS
def __str__(self):
string = ""
for state in range(self.n_states):
string += "{0:0>3b}".format(state) + " => {:.2f}".format(self.qubits[state]) + "\n"
return string[:-1]
def plot(self):
plt.bar(range(self.n_states), np.absolute(self.qubits), color='k')
plt.title(str(self.n_qubits) + ' qubit register')
plt.axis([0, self.n_states, 0.0, 1.0])
plt.show()
def savefig(self, name):
plt.bar(range(self.n_states), np.absolute(self.qubits), color='k')
plt.title(str(self.n_qubits) + ' qubit register')
plt.axis([0, self.n_states, 0.0, 1.0])
plt.savefig("img/" + name + ".pdf")
def plot2(self, save=None, name=None):
cols = 2 ** (self.n_qubits / 2) # integer division!
rows = 2 ** (self.n_qubits - (self.n_qubits / 2))
x = []
y = []
c = []
for i in range(self.n_states):
x.append(i % cols)
y.append(i / cols)
c.append(np.absolute(self.qubits[i]))
plt.xlim(-0.5, cols-0.5)
plt.ylim(-0.5, rows-0.5)
plt.axes().set_aspect('equal')
plt.scatter(x, y, s=2e3, c=c, linewidths=2, vmin=0, vmax=1, cmap=plt.get_cmap('jet'))
if save is None:
plt.show()
else:
plt.axis('off')
plt.title('('+name+')')
fig = plt.gcf()
fig.set_size_inches(cols, rows)
fig.savefig("img/" + save + ".pdf", transparent=True, pad_inches=0) | 33.705882 | 105 | 0.545593 |
7953ee0cccff774b4f0669338f833bf483067f87 | 2,043 | py | Python | src/main.py | isaacrivriv/ChemLAB-PL | 3ddb586ca95cdfceba8c1637ce3615767f1c47cf | [
"MIT"
] | 1 | 2020-04-07T23:06:54.000Z | 2020-04-07T23:06:54.000Z | src/main.py | isaacrivriv/ChemLAB-PL | 3ddb586ca95cdfceba8c1637ce3615767f1c47cf | [
"MIT"
] | 8 | 2020-04-12T17:32:48.000Z | 2020-05-07T03:46:46.000Z | src/main.py | isaacrivriv/ChemLAB-PL | 3ddb586ca95cdfceba8c1637ce3615767f1c47cf | [
"MIT"
] | 2 | 2020-04-07T23:07:30.000Z | 2020-07-14T17:03:31.000Z | import chem_lex.ChemlabLexer as Lexer
import chem_parse.ChemlabParser as Parser
import sys
def main():
# If no file name passed when running we open the ChemLAB command line, if not we try to parse the file passed
try:
# Check if something was passed. If not, we open command line theme
trace = False
lexer = Lexer.ChemLABLexer()
lexer.build()
parser = Parser.ChemlabParser()
parser.build(trace=trace)
if len(sys.argv) <= 1:
while True:
try:
buff = input("ChemLAB >>")
if not buff:
continue
if trace:
print("Buffer Content: ")
print(buff)
print("Tokenized buffer: ")
lexer.test(buff)
print("Parsing File...")
parser.parseContent(buff, lexer.lexer)
except KeyboardInterrupt:
exit()
except Exception as e:
print("Error on line `"+ buff +"`\nPlease try again")
print("Error was: "+str(e))
continue
else:
filename = sys.argv[1]
if trace:
print("Prepping to parse file "+filename)
file = open(filename, 'r')
s = file.read()
if trace:
print("File Content: ")
print(s)
print("Tokenized file: ")
lexer.test(s)
print("Parsing File...")
parser.parseContent(s, lexer.lexer)
except (FileNotFoundError, EOFError) as e:
print("Error reading file. Could not open or read.\n")
except Exception as e2:
print("Error parsing content. Could not parse file.\n")
print("Error found: "+str(e2))
finally:
try:
file.close()
except NameError:
pass
if __name__ == "__main__":
main() | 34.05 | 114 | 0.487029 |
7953ee54489d1166a8bd30a6180493bfbf86657f | 28,625 | py | Python | nli_02_models_trials1.py | abgoswam/cs224u | 33e1a22d1c9586b473f43b388163a74264e9258a | [
"Apache-2.0"
] | null | null | null | nli_02_models_trials1.py | abgoswam/cs224u | 33e1a22d1c9586b473f43b388163a74264e9258a | [
"Apache-2.0"
] | null | null | null | nli_02_models_trials1.py | abgoswam/cs224u | 33e1a22d1c9586b473f43b388163a74264e9258a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Natural language inference: models
# In[1]:
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2020"
# ## Contents
#
# 1. [Contents](#Contents)
# 1. [Overview](#Overview)
# 1. [Set-up](#Set-up)
# 1. [Sparse feature representations](#Sparse-feature-representations)
# 1. [Feature representations](#Feature-representations)
# 1. [Model wrapper](#Model-wrapper)
# 1. [Assessment](#Assessment)
# 1. [Sentence-encoding models](#Sentence-encoding-models)
# 1. [Dense representations with a linear classifier](#Dense-representations-with-a-linear-classifier)
# 1. [Dense representations with a shallow neural network](#Dense-representations-with-a-shallow-neural-network)
# 1. [Sentence-encoding RNNs](#Sentence-encoding-RNNs)
# 1. [Other sentence-encoding model ideas](#Other-sentence-encoding-model-ideas)
# 1. [Chained models](#Chained-models)
# 1. [Simple RNN](#Simple-RNN)
# 1. [Separate premise and hypothesis RNNs](#Separate-premise-and-hypothesis-RNNs)
# 1. [Attention mechanisms](#Attention-mechanisms)
# 1. [Error analysis with the MultiNLI annotations](#Error-analysis-with-the-MultiNLI-annotations)
# 1. [Other findings](#Other-findings)
# 1. [Exploratory exercises](#Exploratory-exercises)
# ## Overview
#
# This notebook defines and explores a number of models for NLI. The general plot is familiar from [our work with the Stanford Sentiment Treebank](sst_01_overview.ipynb):
#
# 1. Models based on sparse feature representations
# 1. Linear classifiers and feed-forward neural classifiers using dense feature representations
# 1. Recurrent and tree-structured neural networks
#
# The twist here is that, while NLI is another classification problem, the inputs have important high-level structure: __a premise__ and __a hypothesis__. This invites exploration of a host of neural model designs:
#
# * In __sentence-encoding__ models, the premise and hypothesis are analyzed separately, combined only for the final classification step.
#
# * In __chained__ models, the premise is processed first, then the hypotheses, giving a unified representation of the pair.
#
# NLI resembles sequence-to-sequence problems like __machine translation__ and __language modeling__. The central modeling difference is that NLI doesn't produce an output sequence, but rather consumes two sequences to produce a label. Still, there are enough affinities that many ideas have been shared among these fields.
# ## Set-up
#
# See [the previous notebook](nli_01_task_and_data.ipynb#Set-up) for set-up instructions for this unit.
# In[2]:
from collections import Counter
from itertools import product
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
import torch
import torch.nn as nn
import torch.utils.data
from torch_model_base import TorchModelBase
from torch_rnn_classifier import TorchRNNClassifier, TorchRNNClassifierModel
from torch_shallow_neural_classifier import TorchShallowNeuralClassifier
from torch_rnn_classifier import TorchRNNClassifier
import nli
import os
import utils
# In[3]:
# Set all the random seeds for reproducibility. Only the
# system and torch seeds are relevant for this notebook.
utils.fix_random_seeds()
# In[4]:
GLOVE_HOME = os.path.join('data', 'glove.6B')
DATA_HOME = os.path.join("data", "nlidata")
SNLI_HOME = os.path.join(DATA_HOME, "snli_1.0")
MULTINLI_HOME = os.path.join(DATA_HOME, "multinli_1.0")
ANNOTATIONS_HOME = os.path.join(DATA_HOME, "multinli_1.0_annotations")
# ## Sparse feature representations
#
# We begin by looking at models based in sparse, hand-built feature representations. As in earlier units of the course, we will see that __these models are competitive__: easy to design, fast to optimize, and highly effective.
# ### Feature representations
#
# The guiding idea for NLI sparse features is that one wants to knit together the premise and hypothesis, so that the model can learn about their relationships rather than just about each part separately.
# With `word_overlap_phi`, we just get the set of words that occur in both the premise and hypothesis.
# In[5]:
def word_overlap_phi(t1, t2):
"""Basis for features for the words in both the premise and hypothesis.
This tends to produce very sparse representations.
Parameters
----------
t1, t2 : `nltk.tree.Tree`
As given by `str2tree`.
Returns
-------
defaultdict
Maps each word in both `t1` and `t2` to 1.
"""
overlap = set([w1 for w1 in t1.leaves() if w1 in t2.leaves()])
return Counter(overlap)
# With `word_cross_product_phi`, we count all the pairs $(w_{1}, w_{2})$ where $w_{1}$ is a word from the premise and $w_{2}$ is a word from the hypothesis. This creates a very large feature space. These models are very strong right out of the box, and they can be supplemented with more fine-grained features.
# In[6]:
def word_cross_product_phi(t1, t2):
"""Basis for cross-product features. This tends to produce pretty
dense representations.
Parameters
----------
t1, t2 : `nltk.tree.Tree`
As given by `str2tree`.
Returns
-------
defaultdict
Maps each (w1, w2) in the cross-product of `t1.leaves()` and
`t2.leaves()` to its count. This is a multi-set cross-product
(repetitions matter).
"""
return Counter([(w1, w2) for w1, w2 in product(t1.leaves(), t2.leaves())])
# ### Model wrapper
#
# Our experiment framework is basically the same as the one we used for the Stanford Sentiment Treebank. Here, I actually use `utils.fit_classifier_with_crossvalidation` (introduced in that unit) to create a wrapper around `LogisticRegression` for cross-validation of hyperparameters. At this point, I am not sure what parameters will be good for our NLI datasets, so this hyperparameter search is vital.
# In[7]:
def fit_softmax_with_crossvalidation(X, y):
"""A MaxEnt model of dataset with hyperparameter cross-validation.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear_model.LogisticRegression
A trained model instance, the best model found.
"""
basemod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='auto')
cv = 3
param_grid = {'C': [0.4, 0.6, 0.8, 1.0],
'penalty': ['l1','l2']}
best_mod = utils.fit_classifier_with_crossvalidation(
X, y, basemod, cv, param_grid)
return best_mod
# ### Assessment
#
# Because SNLI and MultiNLI are huge, we can't afford to do experiments on the full datasets all the time. Thus, we will mainly work within the training sets, using the train readers to sample smaller datasets that can then be divided for training and assessment.
#
# Here, we sample 10% of the training examples. I set the random seed (`random_state=42`) so that we get consistency across the samples; setting `random_state=None` will give new random samples each time.
# In[8]:
train_reader = nli.SNLITrainReader(
SNLI_HOME, samp_percentage=0.10, random_state=42)
# An experimental dataset can be built directly from the reader and a feature function:
# In[9]:
dataset = nli.build_dataset(train_reader, word_overlap_phi)
# In[10]:
dataset.keys()
# However, it's more efficient to use `nli.experiment` to bring all these pieces together. This wrapper will work for all the models we consider.
# In[11]:
# _ = nli.experiment(
# train_reader=nli.SNLITrainReader(
# SNLI_HOME, samp_percentage=0.10, random_state=42),
# phi=word_overlap_phi,
# train_func=fit_softmax_with_crossvalidation,
# assess_reader=None,
# random_state=42)
# In[12]:
# _ = nli.experiment(
# train_reader=nli.SNLITrainReader(
# SNLI_HOME, samp_percentage=0.10, random_state=42),
# phi=word_cross_product_phi,
# train_func=fit_softmax_with_crossvalidation,
# assess_reader=None,
# random_state=42)
# As expected `word_cross_product_phi` is very strong. At this point, one might consider scaling up to `samp_percentage=None`, i.e., the full training set. Such a baseline is very similar to the one established in [the original SNLI paper by Bowman et al.](https://aclanthology.info/papers/D15-1075/d15-1075) for models like this one.
# ## Sentence-encoding models
#
# We turn now to sentence-encoding models. The hallmark of these is that the premise and hypothesis get their own representation in some sense, and then those representations are combined to predict the label. [Bowman et al. 2015](http://aclweb.org/anthology/D/D15/D15-1075.pdf) explore models of this form as part of introducing SNLI.
#
# The feed-forward networks we used in [the word-level bake-off](nli_wordentail_bakeoff.ipynb) are members of this family of models: each word was represented separately, and the concatenation of those representations was used as the input to the model.
# ### Dense representations with a linear classifier
#
# Perhaps the simplest sentence-encoding model sums (or averages, etc.) the word representations for the premise, does the same for the hypothesis, and concatenates those two representations for use as the input to a linear classifier.
#
# Here's a diagram that is meant to suggest the full space of models of this form:
#
# <img src="fig/nli-softmax.png" width=800 />
# Here's an implementation of this model where
#
# * The embedding is GloVe.
# * The word representations are summed.
# * The premise and hypothesis vectors are concatenated.
# * A softmax classifier is used at the top.
# In[13]:
glove_lookup = utils.glove2dict(
os.path.join(GLOVE_HOME, 'glove.6B.50d.txt'))
# In[14]:
def glove_leaves_phi(t1, t2, np_func=np.sum):
"""Represent `tree` as a combination of the vector of its words.
Parameters
----------
t1 : nltk.Tree
t2 : nltk.Tree
np_func : function (default: np.sum)
A numpy matrix operation that can be applied columnwise,
like `np.mean`, `np.sum`, or `np.prod`. The requirement is that
the function take `axis=0` as one of its arguments (to ensure
columnwise combination) and that it return a vector of a
fixed length, no matter what the size of the tree is.
Returns
-------
np.array
"""
prem_vecs = _get_tree_vecs(t1, glove_lookup, np_func)
hyp_vecs = _get_tree_vecs(t2, glove_lookup, np_func)
return np.concatenate((prem_vecs, hyp_vecs))
def _get_tree_vecs(tree, lookup, np_func):
allvecs = np.array([lookup[w] for w in tree.leaves() if w in lookup])
if len(allvecs) == 0:
dim = len(next(iter(lookup.values())))
feats = np.zeros(dim)
else:
feats = np_func(allvecs, axis=0)
return feats
# In[15]:
# _ = nli.experiment(
# train_reader=nli.SNLITrainReader(
# SNLI_HOME, samp_percentage=0.10, random_state=42),
# phi=glove_leaves_phi,
# train_func=fit_softmax_with_crossvalidation,
# assess_reader=None,
# random_state=42,
# vectorize=False) # Ask `experiment` not to featurize; we did it already.
# ### Dense representations with a shallow neural network
#
# A small tweak to the above is to use a neural network instead of a softmax classifier at the top:
# In[16]:
def fit_shallow_neural_classifier_with_crossvalidation(X, y):
basemod = TorchShallowNeuralClassifier(max_iter=50)
cv = 3
param_grid = {'hidden_dim': [25, 50, 100]}
best_mod = utils.fit_classifier_with_crossvalidation(
X, y, basemod, cv, param_grid)
return best_mod
# In[17]:
# _ = nli.experiment(
# train_reader=nli.SNLITrainReader(
# SNLI_HOME, samp_percentage=0.10, random_state=42),
# phi=glove_leaves_phi,
# train_func=fit_shallow_neural_classifier_with_crossvalidation,
# assess_reader=None,
# random_state=42,
# vectorize=False) # Ask `experiment` not to featurize; we did it already.
# ### Sentence-encoding RNNs
#
# A more sophisticated sentence-encoding model processes the premise and hypothesis with separate RNNs and uses the concatenation of their final states as the basis for the classification decision at the top:
#
# <img src="fig/nli-rnn-sentencerep.png" width=800 />
# It is relatively straightforward to extend `torch_rnn_classifier` so that it can handle this architecture:
# #### A sentence-encoding dataset
#
# Whereas `torch_rnn_classifier.TorchRNNDataset` creates batches that consist of `(sequence, sequence_length, label)` triples, the sentence encoding model requires us to double the first two components. The most important features of this is `collate_fn`, which determines what the batches look like:
# In[18]:
class TorchRNNSentenceEncoderDataset(torch.utils.data.Dataset):
def __init__(self, sequences, seq_lengths, y):
self.prem_seqs, self.hyp_seqs = sequences
self.prem_lengths, self.hyp_lengths = seq_lengths
self.y = y
assert len(self.prem_seqs) == len(self.y)
@staticmethod
def collate_fn(batch):
X_prem, X_hyp, prem_lengths, hyp_lengths, y = zip(*batch)
prem_lengths = torch.LongTensor(prem_lengths)
hyp_lengths = torch.LongTensor(hyp_lengths)
y = torch.LongTensor(y)
return (X_prem, X_hyp), (prem_lengths, hyp_lengths), y
def __len__(self):
return len(self.prem_seqs)
def __getitem__(self, idx):
return (self.prem_seqs[idx], self.hyp_seqs[idx],
self.prem_lengths[idx], self.hyp_lengths[idx],
self.y[idx])
# #### A sentence-encoding model
#
# With `TorchRNNSentenceEncoderClassifierModel`, we subclass `torch_rnn_classifier.TorchRNNClassifierModel` and make use of many of its parameters. The changes:
#
# * We add an attribute `self.hypothesis_rnn` for encoding the hypothesis. (The super class has `self.rnn`, which we use for the premise.)
# * The `forward` method concatenates the final states from the premise and hypothesis, and they are the input to the classifier layer, which is unchanged from before but how accepts inputs that are double the size.
# In[19]:
class TorchRNNSentenceEncoderClassifierModel(TorchRNNClassifierModel):
def __init__(self, vocab_size, embed_dim, embedding, use_embedding,
hidden_dim, output_dim, bidirectional, device):
super(TorchRNNSentenceEncoderClassifierModel, self).__init__(
vocab_size, embed_dim, embedding, use_embedding,
hidden_dim, output_dim, bidirectional, device)
self.hypothesis_rnn = nn.LSTM(
input_size=self.embed_dim,
hidden_size=hidden_dim,
batch_first=True,
bidirectional=self.bidirectional)
if bidirectional:
classifier_dim = hidden_dim * 2 * 2
else:
classifier_dim = hidden_dim * 2
self.classifier_layer = nn.Linear(
classifier_dim, output_dim)
def forward(self, X, seq_lengths):
X_prem, X_hyp = X
prem_lengths, hyp_lengths = seq_lengths
prem_state = self.rnn_forward(X_prem, prem_lengths, self.rnn)
hyp_state = self.rnn_forward(X_hyp, hyp_lengths, self.hypothesis_rnn)
state = torch.cat((prem_state, hyp_state), dim=1)
logits = self.classifier_layer(state)
return logits
# #### A sentence-encoding model interface
#
# Finally, we subclass `TorchRNNClassifier`. Here, just need to redefine three methods: `build_dataset` and `build_graph` to make use of the new components above, and `predict_proba` so that it deals with the premise/hypothesis shape of new inputs.
# In[20]:
class TorchRNNSentenceEncoderClassifier(TorchRNNClassifier):
def build_dataset(self, X, y):
X_prem, X_hyp = zip(*X)
X_prem, prem_lengths = self._prepare_dataset(X_prem)
X_hyp, hyp_lengths = self._prepare_dataset(X_hyp)
return TorchRNNSentenceEncoderDataset(
(X_prem, X_hyp), (prem_lengths, hyp_lengths), y)
def build_graph(self):
return TorchRNNSentenceEncoderClassifierModel(
len(self.vocab),
embedding=self.embedding,
embed_dim=self.embed_dim,
use_embedding=self.use_embedding,
hidden_dim=self.hidden_dim,
output_dim=self.n_classes_,
bidirectional=self.bidirectional,
device=self.device)
def predict_proba(self, X):
with torch.no_grad():
X_prem, X_hyp = zip(*X)
X_prem, prem_lengths = self._prepare_dataset(X_prem)
X_hyp, hyp_lengths = self._prepare_dataset(X_hyp)
preds = self.model((X_prem, X_hyp), (prem_lengths, hyp_lengths))
preds = torch.softmax(preds, dim=1).cpu().numpy()
return preds
# #### Simple example
#
# This toy problem illustrates how this works in detail:
# In[21]:
def simple_example():
vocab = ['a', 'b', '$UNK']
# Reversals are good, and other pairs are bad:
train = [
[(list('ab'), list('ba')), 'good'],
[(list('aab'), list('baa')), 'good'],
[(list('abb'), list('bba')), 'good'],
[(list('aabb'), list('bbaa')), 'good'],
[(list('ba'), list('ba')), 'bad'],
[(list('baa'), list('baa')), 'bad'],
[(list('bba'), list('bab')), 'bad'],
[(list('bbaa'), list('bbab')), 'bad'],
[(list('aba'), list('bab')), 'bad']]
test = [
[(list('baaa'), list('aabb')), 'bad'],
[(list('abaa'), list('baaa')), 'bad'],
[(list('bbaa'), list('bbaa')), 'bad'],
[(list('aaab'), list('baaa')), 'good'],
[(list('aaabb'), list('bbaaa')), 'good']]
mod = TorchRNNSentenceEncoderClassifier(
vocab,
max_iter=100,
embed_dim=50,
hidden_dim=50)
X, y = zip(*train)
mod.fit(X, y)
X_test, y_test = zip(*test)
preds = mod.predict(X_test)
print("\nPredictions:")
for ex, pred, gold in zip(X_test, preds, y_test):
score = "correct" if pred == gold else "incorrect"
print("{0:>6} {1:>6} - predicted: {2:>4}; actual: {3:>4} - {4}".format(
"".join(ex[0]), "".join(ex[1]), pred, gold, score))
# In[22]:
simple_example()
# #### Example SNLI run
# In[23]:
def sentence_encoding_rnn_phi(t1, t2):
"""Map `t1` and `t2` to a pair of lits of leaf nodes."""
return (t1.leaves(), t2.leaves())
# In[24]:
def get_sentence_encoding_vocab(X, n_words=None):
wc = Counter([w for pair in X for ex in pair for w in ex])
wc = wc.most_common(n_words) if n_words else wc.items()
vocab = {w for w, c in wc}
vocab.add("$UNK")
return sorted(vocab)
# In[25]:
def fit_sentence_encoding_rnn(X, y):
vocab = get_sentence_encoding_vocab(X, n_words=10000)
mod = TorchRNNSentenceEncoderClassifier(
vocab, hidden_dim=50, max_iter=10)
mod.fit(X, y)
return mod
# In[26]:
_ = nli.experiment(
train_reader=nli.SNLITrainReader(
SNLI_HOME, samp_percentage=0.10, random_state=42),
phi=sentence_encoding_rnn_phi,
train_func=fit_sentence_encoding_rnn,
assess_reader=None,
random_state=42,
vectorize=False)
# ### Other sentence-encoding model ideas
#
# Given that [we already explored tree-structured neural networks (TreeNNs)](sst_03_neural_networks.ipynb#Tree-structured-neural-networks), it's natural to consider these as the basis for sentence-encoding NLI models:
#
# <img src="fig/nli-treenn.png" width=800 />
#
# And this is just the begnning: any model used to represent sentences is presumably a candidate for use in sentence-encoding NLI!
# ## Chained models
#
# The final major class of NLI designs we look at are those in which the premise and hypothesis are processed sequentially, as a pair. These don't deliver representations of the premise or hypothesis separately. They bear the strongest resemblance to classic sequence-to-sequence models.
# ### Simple RNN
#
# In the simplest version of this model, we just concatenate the premise and hypothesis. The model itself is identical to the one we used for the Stanford Sentiment Treebank:
#
# <img src="fig/nli-rnn-chained.png" width=800 />
# To implement this, we can use `TorchRNNClassifier` out of the box. We just need to concatenate the leaves of the premise and hypothesis trees:
# In[27]:
def simple_chained_rep_rnn_phi(t1, t2):
"""Map `t1` and `t2` to a single list of leaf nodes.
A slight variant might insert a designated boundary symbol between
the premise leaves and the hypothesis leaves. Be sure to add it to
the vocab in that case, else it will be $UNK.
"""
return t1.leaves() + t2.leaves()
# Here's a quick evaluation, just to get a feel for this model:
# In[28]:
def fit_simple_chained_rnn(X, y):
vocab = utils.get_vocab(X, n_words=10000)
mod = TorchRNNClassifier(vocab, hidden_dim=50, max_iter=10)
mod.fit(X, y)
return mod
# In[29]:
_ = nli.experiment(
train_reader=nli.SNLITrainReader(
SNLI_HOME, samp_percentage=0.10, random_state=42),
phi=simple_chained_rep_rnn_phi,
train_func=fit_simple_chained_rnn,
assess_reader=None,
random_state=42,
vectorize=False)
# ### Separate premise and hypothesis RNNs
#
# A natural variation on the above is to give the premise and hypothesis each their own RNN:
#
# <img src="fig/nli-rnn-chained-separate.png" width=800 />
#
# This greatly increases the number of parameters, but it gives the model more chances to learn that appearing in the premise is different from appearing in the hypothesis. One could even push this idea further by giving the premise and hypothesis their own embeddings as well. One could implement this easily by modifying [the sentence-encoder version defined above](#Sentence-encoding-RNNs).
# ## Attention mechanisms
#
# Many of the best-performing systems in [the SNLI leaderboard](https://nlp.stanford.edu/projects/snli/) use __attention mechanisms__ to help the model learn important associations between words in the premise and words in the hypothesis. I believe [Rocktäschel et al. (2015)](https://arxiv.org/pdf/1509.06664v1.pdf) were the first to explore such models for NLI.
#
# For instance, if _puppy_ appears in the premise and _dog_ in the conclusion, then that might be a high-precision indicator that the correct relationship is entailment.
#
# This diagram is a high-level schematic for adding attention mechanisms to a chained RNN model for NLI:
#
# <img src="fig/nli-rnn-attention.png" width=800 />
#
# Since TensorFlow will handle the details of backpropagation, implementing these models is largely reduced to figuring out how to wrangle the states of the model in the desired way.
# ## Error analysis with the MultiNLI annotations
#
# The annotations included with the MultiNLI corpus create some powerful yet easy opportunities for error analysis right out of the box. This section illustrates how to make use of them with models you've trained.
#
# First, we train a sentence-encoding model on a sample of the MultiNLI data, just for illustrative purposes:
# In[30]:
rnn_multinli_experiment = nli.experiment(
train_reader=nli.MultiNLITrainReader(
MULTINLI_HOME, samp_percentage=0.10, random_state=42),
phi=sentence_encoding_rnn_phi,
train_func=fit_sentence_encoding_rnn,
assess_reader=None,
random_state=42,
vectorize=False)
# The return value of `nli.experiment` contains the information we need to make predictions on new examples.
#
# Next, we load in the 'matched' condition annotations ('mismatched' would work as well):
# In[31]:
matched_ann_filename = os.path.join(
ANNOTATIONS_HOME,
"multinli_1.0_matched_annotations.txt")
# In[32]:
matched_ann = nli.read_annotated_subset(
matched_ann_filename, MULTINLI_HOME)
# The following function uses `rnn_multinli_experiment` to make predictions on annotated examples, and harvests some other information that is useful for error analysis:
# In[33]:
def predict_annotated_example(ann, experiment_results):
model = experiment_results['model']
phi = experiment_results['phi']
ex = ann['example']
prem = ex.sentence1_parse
hyp = ex.sentence2_parse
feats = phi(prem, hyp)
pred = model.predict([feats])[0]
gold = ex.gold_label
data = {cat: True for cat in ann['annotations']}
data.update({'gold': gold, 'prediction': pred, 'correct': gold == pred})
return data
# Finally, this function applies `predict_annotated_example` to a collection of annotated examples and puts the results in a `pd.DataFrame` for flexible analysis:
# In[34]:
def get_predictions_for_annotated_data(anns, experiment_results):
data = []
for ex_id, ann in anns.items():
results = predict_annotated_example(ann, experiment_results)
data.append(results)
return pd.DataFrame(data)
# In[35]:
ann_analysis_df = get_predictions_for_annotated_data(
matched_ann, rnn_multinli_experiment)
# With `ann_analysis_df`, we can see how the model does on individual annotation categories:
# In[36]:
pd.crosstab(ann_analysis_df['correct'], ann_analysis_df['#MODAL'])
# ## Other findings
#
# 1. A high-level lesson of [the SNLI leaderboard](https://nlp.stanford.edu/projects/snli/) is that one can do __extremely well__ with simple neural models whose hyperparameters are selected via extensive cross-validation. This is mathematically interesting but might be dispiriting to those of us without vast resources to devote to these computations! (On the flip side, cleverly designed linear models or ensembles with sparse feature representations might beat all of these entrants with a fraction of the computational budget.)
#
# 1. In an outstanding project for this course in 2016, [Leonid Keselman](https://leonidk.com) observed that [one can do much better than chance on SNLI by processing only the hypothesis](https://leonidk.com/stanford/cs224u.html). This relates to [observations we made in the word-level homework/bake-off](hw4_wordentail.ipynb) about how certain terms will tend to appear more on the right in entailment pairs than on the left. Last year, a number of groups independently (re-)discovered this fact and published analyses: [Poliak et al. 2018](https://aclanthology.info/papers/S18-2023/s18-2023), [Tsuchiya 2018](https://aclanthology.info/papers/L18-1239/l18-1239), [Gururangan et al. 2018](https://aclanthology.info/papers/N18-2017/n18-2017).
#
# 1. As we pointed out at the start of this unit, [Dagan et al. (2006) pitched NLI as a general-purpose NLU task](nli_01_task_and_data.ipynb#Overview). We might then hope that the representations we learn on this task will transfer to others. So far, the evidence for this is decidedly mixed. I suspect the core scientific idea is sound, but that __we still lack the needed methods for doing transfer learning__.
#
# 1. For SNLI, we seem to have entered the inevitable phase in machine learning problems where __ensembles do best__.
# ## Exploratory exercises
#
# These are largely meant to give you a feel for the material, but some of them could lead to projects and help you with future work for the course. These are not for credit.
#
# 1. When we [feed dense representations to a simple classifier](#Dense-representations-with-a-linear-classifier), what is the effect of changing the combination functions (e.g., changing `sum` to `mean`; changing `concatenate` to `difference`)? What happens if we swap out `LogisticRegression` for, say, an [sklearn.ensemble.RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) instance?
#
# 1. Implement the [Separate premise and hypothesis RNN](#Separate-premise-and-hypothesis-RNNs) and evaluate it, comparing in particular against [the version that simply concatenates the premise and hypothesis](#Simple-RNN). Does having all these additional parameters pay off? Do you need more training examples to start to see the value of this idea?
#
# 1. The illustrations above all use SNLI. It is worth experimenting with MultiNLI as well. It has both __matched__ and __mismatched__ dev sets. It's also interesting to think about combining SNLI and MultiNLI, to get additional training instances, to push the models to generalize more, and to assess transfer learning hypotheses.
| 38.268717 | 743 | 0.712559 |
7953f0476f3f39b6e57c98b9f7e440474868521d | 774 | py | Python | examples/cloudobject.py | gerardparis/pywren-ibm-cloud | ca69bed54f5bd5157bcda961b86dbfcfecf3c54a | [
"Apache-2.0"
] | null | null | null | examples/cloudobject.py | gerardparis/pywren-ibm-cloud | ca69bed54f5bd5157bcda961b86dbfcfecf3c54a | [
"Apache-2.0"
] | null | null | null | examples/cloudobject.py | gerardparis/pywren-ibm-cloud | ca69bed54f5bd5157bcda961b86dbfcfecf3c54a | [
"Apache-2.0"
] | null | null | null | """
Simple PyWren example using cloudobjects to transparently pass
objects stored in the storage backend between functions without
knowing they exact location (bucket, key)
"""
import pywren_ibm_cloud as pywren
def my_function_put(text, internal_storage):
co1 = internal_storage.put_object('Temp object test 1: {}'.format(text, ))
co2 = internal_storage.put_object('Temp object test 2: {}'.format(text, ))
return [co1, co2]
def my_function_get(co, internal_storage):
data = internal_storage.get_object(co)
return data
if __name__ == "__main__":
pw = pywren.ibm_cf_executor()
pw.call_async(my_function_put, 'Hello World')
cloudobjects = pw.get_result()
pw.map(my_function_get, cloudobjects)
print(pw.get_result())
pw.clean()
| 28.666667 | 78 | 0.732558 |
7953f0deb4bfbf859be4a0992be763073681869a | 397 | py | Python | bin/cakechat_server.py | sketscripter/emotional-chatbot-cakechat | 470df58a2206a0ea38b6bed53b20cbc63bd3de24 | [
"Apache-2.0"
] | 1,608 | 2018-01-31T15:22:29.000Z | 2022-03-30T19:59:16.000Z | bin/cakechat_server.py | GaelicThunder/cakechat | 844507281b30d81b3fe3674895fe27826dba8438 | [
"Apache-2.0"
] | 60 | 2018-02-01T11:45:51.000Z | 2019-11-13T10:35:59.000Z | bin/cakechat_server.py | GaelicThunder/cakechat | 844507281b30d81b3fe3674895fe27826dba8438 | [
"Apache-2.0"
] | 690 | 2018-01-31T17:57:19.000Z | 2022-03-30T07:07:41.000Z | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cakechat.utils.env import set_keras_tf_session
gpu_memory_fraction = os.environ.get('GPU_MEMORY_FRACTION', 0.1)
set_keras_tf_session(gpu_memory_fraction)
from cakechat.api.v1.server import app
if __name__ == '__main__':
# runs development server
app.run(host='0.0.0.0', port=8080)
| 24.8125 | 76 | 0.7733 |
7953f0fe616255ca5f2f84a88997834a94d16a25 | 17,958 | py | Python | tetris.py | ForceOverArea/tetrabot-code | 56bfae9cea347016f65077bb5e5942c8e64ca85d | [
"MIT"
] | null | null | null | tetris.py | ForceOverArea/tetrabot-code | 56bfae9cea347016f65077bb5e5942c8e64ca85d | [
"MIT"
] | 2 | 2020-12-01T22:36:48.000Z | 2021-06-05T21:44:56.000Z | tetris.py | ForceOverArea/tetrabot-code | 56bfae9cea347016f65077bb5e5942c8e64ca85d | [
"MIT"
] | null | null | null | import copy, random
"""
This library is a 'toolbox' for building a tetris game elsewhere
- This library intended for use with Discord.py/Discord API
"""
def default():
# This function can be copied and modified to
# provide different skins for the game
"""
Returns a dict of the default discord emojis used by the game.
"""
return {
"r":":red_square:",
"o":":orange_square:",
"y":":yellow_square:",
"g":":green_square:",
"b":":blue_square:",
"p":":purple_square:",
"w":":blue_circle:",
"empty":":black_circle:"}
def new_board(entities=default()):
"""
Returns a blank 8x12 tetris board with only empty spaces.
- This is in list(list(str)) format
"""
#tile = entities["empty"]
top_margin = 2*[11*[""]]
#body = 16*[[""] + 8*[entities["empty"]] + [""] + [""]]
#real_body = copy.deepcopy(body) #BUG: spotted a potential bug where all rows point to the same address
real_body = [
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""],
[""] + 8*[copy.deepcopy(entities["empty"])] + [""] + [""]
]
bottom_margin = [11*[""]]
raw = top_margin + real_body + bottom_margin
return raw
class tetramino():
"""Represents a tetramino in space on the board coord system.
NOTE this object can be initialized with any string, but only those listed
below are renderable.
"""
def __init__(self, shape, rotation=0, entities=default()): # defaults to plain square emoji set
self.entities = entities
self.shape = shape.upper() # shape must be a string representing the "letter name" of each tetramino
self.rot = rotation # rotation can have value 0-3 with each int corresponding to 90deg rotation
def render(self):
"""
Renders the tetramino in a list(list(str)) format.
- NOTE doing this "unpacks" the information about the tetramino to a
more visual-fiendly format, but is much harder to manipulate
than the tetramino obj itself.
"""
if self.shape == "T":
# define the entities used here, then put them in a grid below
# this applies to all shapes
t = self.entities["p"]
o = self.entities["empty"]
if self.rot == 0:
return [
[o,o,o,o],
[t,t,t,o],
[o,t,o,o],
[o,o,o,o]
]
if self.rot == 1:
return [
[o,o,o,o],
[o,t,o,o],
[o,t,t,o],
[o,t,o,o]
]
if self.rot == 2:
return [
[o,o,o,o],
[o,t,o,o],
[t,t,t,o],
[o,o,o,o]
]
if self.rot == 3:
return [
[o,o,o,o],
[o,t,o,o],
[t,t,o,o],
[o,t,o,o]
]
if self.shape == "I":
t = self.entities["w"]
o = self.entities["empty"]
if self.rot in [0,2]:
return [
[o,o,o,o],
[o,o,o,o],
[t,t,t,t],
[o,o,o,o]
]
if self.rot in [1,3]:
return [
[o,t,o,o],
[o,t,o,o],
[o,t,o,o],
[o,t,o,o]
]
if self.shape == "L":
t = self.entities["o"]
o = self.entities["empty"]
if self.rot == 0:
return [
[o,o,o,o],
[o,t,o,o],
[o,t,o,o],
[o,t,t,o]
]
if self.rot == 1:
return [
[o,o,o,o],
[o,o,t,o],
[t,t,t,o],
[o,o,o,o]
]
if self.rot == 2:
return [
[o,o,o,o],
[t,t,o,o],
[o,t,o,o],
[o,t,o,o]
]
if self.rot == 3:
return [
[o,o,o,o],
[o,o,o,o],
[t,t,t,o],
[t,o,o,o]
]
if self.shape == "J":
t = self.entities["b"]
o = self.entities["empty"]
if self.rot == 0:
return [
[o,o,o,o],
[o,t,o,o],
[o,t,o,o],
[t,t,o,o]
]
if self.rot == 1:
return [
[o,o,o,o],
[o,o,o,o],
[t,t,t,o],
[o,o,t,o]
]
if self.rot == 2:
return [
[o,o,o,o],
[o,t,t,o],
[o,t,o,o],
[o,t,o,o]
]
if self.rot == 3:
return [
[o,o,o,o],
[t,o,o,o],
[t,t,t,o],
[o,o,o,o]
]
if self.shape == "S":
t = self.entities["g"]
o = self.entities["empty"]
if self.rot in [0,2]:
return [
[o,o,o,o],
[o,t,t,o],
[t,t,o,o],
[o,o,o,o]
]
if self.rot in [1,3]:
return [
[o,o,o,o],
[t,o,o,o],
[t,t,o,o],
[o,t,o,o]
]
if self.shape == "Z":
t = self.entities["r"]
o = self.entities["empty"]
if self.rot in [0,2]:
return [
[o,o,o,o],
[t,t,o,o],
[o,t,t,o],
[o,o,o,o]
]
if self.rot in [1,3]:
return [
[o,o,o,o],
[o,t,o,o],
[t,t,o,o],
[t,o,o,o]
]
if self.shape == "O":
t = self.entities["y"]
o = self.entities["empty"]
return [ # shape has only one unique orientation, so no decision tree
[o,o,o,o],
[o,t,t,o],
[o,t,t,o],
[o,o,o,o]
]
def rotate(self, direction:bool):
if not direction:
self.rot += 1
if self.rot > 3:
self.rot = 0
elif direction:
self.rot += -1
if self.rot < 0:
self.rot = 3
else:
raise Exception("error in '.rotate' method")
class board():
"""THIS CLASS IS PRIMARILY MEANT FOR USE WITHIN THE 'GAME' CLASS. IT MAY MISBEHAVE WHEN ALTERED INDEPENDENTLY"""
def __init__(self, entities=default(), state=new_board()):
self.entities = entities
self.state = state
def merge(self, sprite, x, y):
"""
Merges a tetramino to the board's 'state' attribute if the result
adheres to game rules
"""
sprite = sprite.render()
ymargin = 1
xmargin = 0
entities = self.entities
for j in range(y+ymargin, y+4+ymargin):
for i in range(x+xmargin, x+4+xmargin):
sj = j-y-ymargin #find x,y coords for pixels in the 'sprite list'
si = i-x-xmargin
sprite_pixel_empty = sprite[sj][si] == entities["empty"]
board_pixel_empty = self.state[j][i] == entities["empty"]
if sprite_pixel_empty:
continue
if not sprite_pixel_empty:
if board_pixel_empty:
self.state[j][i] = sprite[sj][si]
else: # if the above conditions are not meant, crash
raise Exception(f"Tetramino is colliding with a solid object at board pixel x:{i} y:{j}")
def clear_lines(self):
"""Clears all lines that have no empty entities in them."""
count = 0
for j in range(2,18):
if self.entities["empty"] not in self.state[j]:
self.state.pop(j)
count+=1
self.state.insert(2, [""] + 8*[copy.deepcopy(self.entities["empty"])] + [""] + [""],)
return count
def display(self, sprite, x, y):
"""Method that overlays a sprite on the board temporarily"""
sprite = sprite.render()
tempstate = copy.deepcopy(self.state)
ymargin = 1
xmargin = 0
entities = self.entities
for j in range(y+ymargin, y+4+ymargin):
for i in range(x+xmargin, x+4+xmargin):
sj = j-y-ymargin #find x,y coords for pixels in the 'sprite list'
si = i-x-xmargin
sprite_pixel_empty = sprite[sj][si] == entities["empty"]
board_pixel_empty = tempstate[j][i] == entities["empty"]
if sprite_pixel_empty:
continue
if not sprite_pixel_empty:
if board_pixel_empty:
tempstate[j][i] = sprite[sj][si]
else: # if the above conditions are not meant, crash
raise Exception(f"Tetramino is colliding with a solid object at board pixel x:{i} y:{j}")
return "\n".join(["".join(row) for row in tempstate])
def dispraw(self):
"""Displays sprites overlayed onto board without automatic error handling for debugging purposes."""
return "\n".join(["".join(row) for row in self.state])
class game():
"""Represents a tetris game with a distinct board and active piece."""
def __init__(self, player, instance, board:board, x:int, y:int):
self.instance = instance
self.player = player
self.board = board
self.grab_bag = ["T","I","O","L","J","S","Z"]
random.shuffle(self.grab_bag)
self.score = 0
self.score_bonus = 1
self.piece = tetramino(self.grab_bag.pop())
self.hold_piece = tetramino("") # start with a blank tetramino here to simplify hold method definition code
self.alreadyHeld = False # user has not used their hold by default
self.x = x
self.y = y
def left(self):
"""Moves the cursor 1 unit left."""
self.board.display(self.piece, self.x-1, self.y)
# if the operation is illegal, the board.display()
# method will crash and prevent the data update
self.x += -1
def right(self):
"""Moves the cursor 1 unit right."""
self.board.display(self.piece, self.x+1, self.y)
self.x += 1
def drop(self):
"""Drops the piece by 1 unit if possible."""
self.board.display(self.piece, self.x, self.y+1)
self.y += 1
def cw(self):
"""Changes the piece's angle by -90deg."""
rotation_test = copy.copy(self.piece)
rotation_test.rotate(True)
self.board.display(rotation_test, self.x, self.y) # this will crash if the move is illegal and prevent rotation from being altered
self.piece.rotate(True)
def ccw(self):
"""Changes the piece's angle by +90deg."""
rotation_test = copy.copy(self.piece)
rotation_test.rotate(False)
self.board.display(rotation_test, self.x, self.y)
self.piece.rotate(False)
def tspin_cw(self):
"""Does a t-spin if possible and eligible on a cw rotation."""
try:
self.board.display(self.piece, self.x, self.y-1)
# if ELIGIBLE, T-piece should NOT be able to move up 1 pixel since it's under a ledge
# if this is the case, the above will crash and the exception will attempt the t-spin
except:
ts_test = copy.copy(self.piece)
ts_test.rotate(True)
self.board.display(ts_test, self.x-1, self.y+2) # test if the display method will allow this, if so, the below code will run as well without issue
# if the above doesn't crash do the following
self.piece.rotate(True)
self.x += -1
self.y += 2
self.score_bonus = 100 # temporarily set the bonus multiplier to 100
return
raise Exception("ineligible for t-spin")
def tspin_ccw(self):
"""Does a t-spin if possible on a ccw rotation."""
try:
self.board.display(self.piece, self.x, self.y-1)
# if ELIGIBLE, T-piece should NOT be able to move up 1 pixel since it's under a ledge
# if this is the case, the above will crash and the exception will attempt the t-spin
except:
ts_test = copy.copy(self.piece)
ts_test.rotate(False)
self.board.display(ts_test, self.x+1, self.y+2)
# if the above doesn't crash do the following
self.piece.rotate(False)
self.x += 1
self.y += 2
self.score_bonus = 100
return
raise Exception("ineligible for t-spin")
def harddrop(self):
"""Instantly drops a piece as far down as possible."""
for hdy in range((self.y),18):
try:
print("trying: ", hdy)
self.board.display(self.piece, self.x, hdy)
except:
print("excepting: ", hdy)
self.board.display(self.piece, self.x, hdy-1) #crashes if the resulting harddrop is impossible/illegal
self.y = hdy-1 #sets the cursor position
def hold(self):
"""Save a piece for later use."""
if self.hold_piece.shape == "":
print("Attempting primary hold")
# swap the piece into the hold slot and grab a new one, then reset cursor
self.hold_piece = self.piece
self.grab()
self.x = 3
self.y = 0
self.alreadyHeld = True
# prevent player from spamming hold to stall.
# this status is reverted to False after a
# successful merge() call. see merge() definition for more info
else:
print("Attempting secondary hold")
# swap the pieces in the hold and piece slots, then reset cursor
stor = self.hold_piece
self.hold_piece = self.piece
self.piece = stor
self.x = 3
self.y = 0
self.alreadyHeld = True
def clear(self):
"""Clears all complete lines on the board."""
score_factor = self.board.clear_lines()
if score_factor != 0:
# if the board is perfectly cleared, multiply bonus factor by 100000.
if self.board.state == new_board():
self.score_bonus = self.score_bonus*100000 # NOTE that this only works because t-spin bonus is ALWAYS applied prior to line clearing.
self.score += self.score_bonus*10**score_factor
self.score_bonus = 1
def grab(self):
"""Picks a new piece from the grab bag and automatically refills it when empty."""
try:
self.piece = tetramino(self.grab_bag.pop())
except:
self.grab_bag = ["T","I","O","L","J","S","Z"]
random.shuffle(self.grab_bag)
self.piece = tetramino(self.grab_bag.pop())
def merge(self):
"""Merges the current piece to the board at the current cursor position."""
self.board.merge(self.piece, self.x, self.y)
self.alreadyHeld = False
# allow the player to hold again now
# that they have used their current piece
def display(self):
"""Returns a string of the current game's screen."""
return self.board.display(self.piece, self.x, self.y) | 33.503731 | 159 | 0.447489 |
7953f110dc7f2fc36ffd1f5ce007c29f207b8bc9 | 288 | py | Python | OPTIMIZED_CLEAN.py | CaptainVietnam6/GoPro-output-cleaner | fbffc8ff91d2cc9845c2aab1c41a0e0f9c6ac6d4 | [
"MIT"
] | null | null | null | OPTIMIZED_CLEAN.py | CaptainVietnam6/GoPro-output-cleaner | fbffc8ff91d2cc9845c2aab1c41a0e0f9c6ac6d4 | [
"MIT"
] | null | null | null | OPTIMIZED_CLEAN.py | CaptainVietnam6/GoPro-output-cleaner | fbffc8ff91d2cc9845c2aab1c41a0e0f9c6ac6d4 | [
"MIT"
] | null | null | null | import os
n=0
d=f"./GoPro{n}.mp4"
l=(".WAV",".THM",".LRV")
while True:
if os.path.exists(d)==True:
n+=1
else:
break
for f in os.listdir("./"):
if f.endswith(".MP4"):
os.rename(f,f"GoPro{n}.mp4")
n+=1
elif f.endswith(l):
os.remove(f) | 19.2 | 36 | 0.496528 |
7953f20364fdede96cd3eadcf6253d9aa42d82b8 | 897 | py | Python | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_inischema.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2017-08-13T14:09:32.000Z | 2018-07-16T23:39:00.000Z | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_inischema.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | null | null | null | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_inischema.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2018-04-02T06:45:11.000Z | 2018-07-16T23:39:02.000Z | SAMPLE_SCHEMA = """
[first]
value1 = int
value2 = string
value3 = float
value4 = listofstrings
value5 = listofints
[second]
value1 = string
value2 = int
"""
SAMPLE_CONFIGURATION = """
[first]
value1 = 3
value2 = Three
value3 = 3.0
value4 = Three Drei
value5 = 3 3
[second]
value1 =
a few line breaks
are no problem
neither is a missing value2
"""
def test_parse_config():
from ..console import parse_config
result = parse_config(SAMPLE_SCHEMA, SAMPLE_CONFIGURATION)
assert result['first']['value1'] == 3
assert result['first']['value2'] == u'Three'
assert result['first']['value3'] == 3.0
assert result['first']['value4'] == [u'Three', u'Drei']
assert result['first']['value5'] == [3, 3]
assert result['second']['value1'] == (
u'a few line breaks\nare no problem\nneither is a missing value2')
assert 'value2' not in result['second']
| 20.860465 | 74 | 0.656633 |
7953f3163af7bc874692f51aa8af9824cf787989 | 13,356 | py | Python | tenable/io/users.py | Rogdham/pyTenable | 79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a | [
"MIT"
] | 1 | 2022-03-01T17:17:19.000Z | 2022-03-01T17:17:19.000Z | tenable/io/users.py | Rogdham/pyTenable | 79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a | [
"MIT"
] | 25 | 2021-11-16T18:41:36.000Z | 2022-03-25T05:43:31.000Z | tenable/io/users.py | Rogdham/pyTenable | 79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a | [
"MIT"
] | 2 | 2022-03-02T12:24:40.000Z | 2022-03-29T05:12:04.000Z | '''
Users
=====
The following methods allow for interaction into the Tenable.io
:devportal:`users <users>` API endpoints.
Methods available on ``tio.users``:
.. rst-class:: hide-signature
.. autoclass:: UsersAPI
:members:
'''
from tenable.utils import dict_merge
from tenable.io.base import TIOEndpoint
class UsersAPI(TIOEndpoint):
'''
This will contain all methods related to Users
'''
def create(self, username, password, permissions,
name=None, email=None, account_type=None):
'''
Create a new user.
:devportal:`users: create <users-create>`
Args:
username (str): The username for the new user.
password (str): The password for the new user.
permissions (int):
The permissions role for the user. The permissions integer
is derived based on the desired role of the user. For details
describing what permissions values mean what roles, please refer
to the `User Roles <https://cloud.tenable.com/api#/authorization>`_
table to see what permissions are accepted.
name (str, optional): The human-readable name of the user.
email (str, optional): The email address of the user.
account_type (str, optional):
The account type for the user. The default is `local`.
Returns:
:obj:`dict`:
The resource record of the new user.
Examples:
Create a standard user:
>>> user = tio.users.create('jsmith@company.com', 'password1', 32)
Create an admin user and add the email and name:
>>> user = tio.users.create('jdoe@company.com', 'password', 64,
... name='Jane Doe', email='jdoe@company.com')
'''
payload = {
'username': self._check('username', username, str),
'password': self._check('password', password, str),
'permissions': self._check('permissions', permissions, int),
'type': self._check('account_type', account_type, str, default='local'),
}
if name:
payload['name'] = self._check('name', name, str)
if email:
payload['email'] = self._check('email', email, str)
return self._api.post('users', json=payload).json()
def delete(self, user_id):
'''
Removes a user from Tenable.io.
:devportal:`users: delete <users-delete>`
Args:
user_id (int): The unique identifier of the user.
Returns:
:obj:`None`:
The user was successfully deleted.
Examples:
>>> tio.users.delete(1)
'''
self._api.delete('users/{}'.format(self._check('user_id', user_id, int)))
def details(self, user_id):
'''
Retrieve the details of a user.
:devportal:`users: details <users-details>`
Args:
user_id (int): The unique identifier for the user.
Returns:
:obj:`dict`:
The resource record for the user.
Examples:
>>> user = tio.users.details(1)
'''
return self._api.get('users/{}'.format(self._check('user_id', user_id, int))).json()
def edit(self, user_id, permissions=None, name=None, email=None, enabled=None):
'''
Modify an existing user.
:devportal:`users: edit <users-edit>`
Args:
user_id (int): The unique identifier for the user.
permissions (int, optional):
The permissions role for the user. The permissions integer
is derived based on the desired role of the user. For details
describing what permissions values mean what roles, please refer
to the `User Roles <https://cloud.tenable.com/api#/authorization>`_
table to see what permissions are accepted.
name (str, optional): The human-readable name of the user.
email (str, optional): The email address of the user.
enabled (bool, optional): Is the user account enabled?
Returns:
:obj:`dict`:
The modified user resource record.
Examples:
>>> user = tio.users.edit(1, name='New Full Name')
'''
payload = dict()
if permissions:
payload['permissions'] = self._check('permissions', permissions,
int)
if enabled is not None:
payload['enabled'] = self._check('enabled', enabled, bool)
if email:
payload['email'] = self._check('email', email, str)
if name:
payload['name'] = self._check('name', name, str)
# Merge the data that we build with the payload with the user details.
user = self.details(self._check('user_id', user_id, int))
payload = dict_merge({
'permissions': user['permissions'],
'enabled': user['enabled'],
'email': user['email'],
'name': user.get('name', None),
}, payload)
return self._api.put('users/{}'.format(user_id), json=payload).json()
def enabled(self, user_id, enabled):
'''
Enable the user account.
:devportal:`users: enabled <users-enabled>`
Args:
user_id (int): The unique identifier for the user.
enabled (bool): Is the user enabled?
Returns:
:obj:`dict`:
The modified user resource record.
Examples:
Enable a user:
>>> tio.users.enabled(1, True)
Disable a user:
>>> tio.users.enabled(1, False)
'''
return self._api.put('users/{}/enabled'.format(
self._check('user_id', user_id, int)), json={
'enabled': self._check('enabled', enabled, bool)}).json()
def two_factor(self, user_id, email, sms, phone=None):
'''
Configure two-factor authorization for a specific user.
:devportal:`users: two-factor <users-two-factor>`
Args:
user_id (int): The unique identifier for the user.
email (bool):
Whether two-factor should be additionally sent as an email.
sms (bool):
Whether two-factor should be enabled. This will send SMS codes.
phone (str, optional):
The phone number to use for two-factor authentication. Required
when sms is set to `True`.
Returns:
:obj:`None`:
Setting changes were successfully updated.
Examples:
Enable email authorization for a user:
>>> tio.users.two_factor(1, True, False)
Enable SMS authorization for a user:
>>> tio.users.two_factor(1, False, True, '9998887766')
'''
payload = {
'email_enabled': self._check('email', email, bool),
'sms_enabled': self._check('sms', sms, bool)
}
if phone:
payload['sms_phone'] = self._check('phone', phone, str)
self._api.put('users/{}/two-factor'.format(
self._check('user_id', user_id, int)), json=payload)
def enable_two_factor(self, user_id, phone, password):
'''
Enable phone-based two-factor authorization for a specific user.
:devportal:`users: two-factor-enable <users-two-factor-enable>`
Args:
user_id (int): The user id
phone (str): The phone number to use for two-factor auth.
password (str): The user password.
Returns:
:obj:`None`:
One-time activation code sent to the provided phone number.
Examples:
>>> tio.users.enable_two_factor(1, '9998887766')
'''
self._api.post('users/{}/two-factor/send-verification'.format(
self._check('user_id', user_id, int)), json={
'sms_phone': self._check('phone', phone, str),
'password': self._check('password', password, str)
})
def verify_two_factor(self, user_id, code):
'''
Send the verification code for two-factor authorization.
:devportal:`users: two-factor-enable-verify <users-two-factor-enable-verify>`
Args:
code (str): The verification code that was sent to the device.
Returns:
:obj:`None`:
The verification code was valid and two-factor is enabled.
Examples:
>>> tio.users.verify_two_factor(1, 'abc123')
'''
self._api.post('users/{}/two-factor/verify-code'.format(
self._check('user_id', user_id, int)), json={
'verification_code': self._check('code', code, str)})
def impersonate(self, name):
'''
Impersonate as a specific user.
:devportal:`users: impersonate <users/impersonate>`
Args:
name (str): The user-name of the user to impersonate.
Returns:
:obj:`None`:
Impersonation successful.
Examples:
>>> tio.users.impersonate('jdoe@company.com')
'''
self._api._session.headers.update({
'X-Impersonate': 'username={}'.format(self._check('name', name, str))
})
def list(self):
'''
Retrieves a list of users.
:devportal:`users: list <users-list>`
Returns:
:obj:`list`:
List of user resource records.
Examples:
>>> for user in tio.users.list():
... pprint(user)
'''
return self._api.get('users').json()['users']
def change_password(self, user_id, old_password, new_password):
'''
Change the password for a specific user.
:devportal:`users: password <users-password>`
Args:
user_id (int): The unique identifier for the user.
old_password (str): The current password.
new_password (str): The new password.
Returns:
:obj:`None`:
The password has been successfully changed.
Examples:
>>> tio.users.change_password(1, 'old_pass', 'new_pass')
'''
self._api.put('users/{}/chpasswd'.format(self._check('user_id', user_id, int)), json={
'password': self._check('new_password', new_password, str),
'current_password': self._check('old_password', old_password, str)
})
def gen_api_keys(self, user_id):
'''
Generate the API keys for a specific user.
:devportal:`users: keys <user-keys>`
Args:
user_id (int): The unique identifier for the user.
Returns:
:obj:`dict`:
A dictionary containing the new API Key-pair.
Examples:
>>> keys = tio.users.gen_api_keys(1)
'''
return self._api.put('users/{}/keys'.format(
self._check('user_id', user_id, int))).json()
def list_auths(self, user_id):
'''
list user authorizations for accessing a Tenable.io instance.
:devportal:`users: list-auths <users-list-auths>`
Args:
user_id (int): The unique identifier for the user.
Returns:
:obj:`dict`:
Returns authorizations for the user.
Examples:
>>> auth = tio.users.list_auths(1)
'''
return self._api.get('users/{}/authorizations'.format(
self._check('user_id', user_id, int))).json()
def edit_auths(self, user_id, api_permitted=None, password_permitted=None, saml_permitted=None):
'''
update user authorizations for accessing a Tenable.io instance.
:devportal:`users: edit-auths <users-update-auths>`
Args:
user_id (int):
The unique identifier for the user.
api_permitted (bool):
Indicates whether API access is authorized for the user.
password_permitted (bool):
Indicates whether user name and password login is authorized for the user.
saml_permitted (bool):
Indicates whether SSO with SAML is authorized for the user.
Returns:
:obj:`None`:
Returned if Tenable.io successfully updates the user's authorizations.
Examples:
>>> tio.users.edit_auths(1, True, True, False)
'''
# get current settings
current = self.list_auths(self._check('user_id', user_id, int))
# update payload with new settings
payload = {
'api_permitted': self._check('api_permitted', api_permitted, bool,
default=current['api_permitted']),
'password_permitted': self._check('password_permitted', password_permitted, bool,
default=current['password_permitted']),
'saml_permitted': self._check('saml_permitted', saml_permitted, bool,
default=current['saml_permitted'])
}
return self._api.put('users/{}/authorizations'.format(
self._check('user_id', user_id, int)), json=payload)
| 33.557789 | 100 | 0.562519 |
7953f35b3d1500c435877da74fb97a6a734f8bf2 | 1,187 | py | Python | Creacion-de-un-experimento-en-Azure-Machine-Learning-Studio/test.py | josephLSalgado/IA-Innovaccion | 67650ab1f27099859afaa704546d0eee64a71fac | [
"MIT"
] | null | null | null | Creacion-de-un-experimento-en-Azure-Machine-Learning-Studio/test.py | josephLSalgado/IA-Innovaccion | 67650ab1f27099859afaa704546d0eee64a71fac | [
"MIT"
] | null | null | null | Creacion-de-un-experimento-en-Azure-Machine-Learning-Studio/test.py | josephLSalgado/IA-Innovaccion | 67650ab1f27099859afaa704546d0eee64a71fac | [
"MIT"
] | null | null | null | import urllib
import urllib.request
import json
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["WallArea", "RoofArea", "OverallHeight", "GlazingArea", "HeatingLoad"],
"Values": [ [ "296", "110.25", "7", "0", "15.55" ], [ "400", "93.25", "1", "0", "23.55" ], ]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
url = 'https://ussouthcentral.services.azureml.net/workspaces/b1675693baaa45b3b159b2836951deca/services/fed9c92a51b5450fbc6f9f74518c0991/execute?api-version=2.0&details=true'
api_key = 'API_KEY'
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
req = urllib.request.Request(url, body, headers)
try:
response = urllib.request.urlopen(req)
result = response.read()
print(result)
except urllib.error.HTTPError as error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(json.loads(error.read())) | 32.081081 | 174 | 0.610783 |
7953f3fb045972e2ab2eddbe79a450e587288f8a | 192 | py | Python | bin/cubes/non-convex-pentacubes-diagonal-wall.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | bin/cubes/non-convex-pentacubes-diagonal-wall.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | bin/cubes/non-convex-pentacubes-diagonal-wall.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | 1 | 2022-01-02T16:54:14.000Z | 2022-01-02T16:54:14.000Z | #!/usr/bin/env python
# $Id$
"""many solutions"""
import puzzler.puzzles
from puzzler.puzzles.pentacubes import NonConvexPentacubesDiagonalWall
puzzler.run(NonConvexPentacubesDiagonalWall)
| 19.2 | 70 | 0.8125 |
7953f409bacbee1865666a71b44024d600ad0fba | 1,676 | py | Python | src/networks.py | jlebensold/flrl-ddpg | d91e9f4aedf48d0614e33bd22c7f684ecda089b1 | [
"MIT"
] | 1 | 2021-05-11T06:28:01.000Z | 2021-05-11T06:28:01.000Z | src/networks.py | jlebensold/flrl-ddpg | d91e9f4aedf48d0614e33bd22c7f684ecda089b1 | [
"MIT"
] | null | null | null | src/networks.py | jlebensold/flrl-ddpg | d91e9f4aedf48d0614e33bd22c7f684ecda089b1 | [
"MIT"
] | 1 | 2021-03-07T06:33:17.000Z | 2021-03-07T06:33:17.000Z | import random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from collections import ChainMap
class ValueNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-3):
super(PolicyNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, num_actions)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = torch.tanh(self.linear3(x))
return x
# def get_action(self, state):
# state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
# action = self.forward(state)
# return action.detach().cpu().numpy()[0, 0]
#
| 29.403509 | 74 | 0.664081 |
7953f43a2d724a0df9c7ea3aeca8677a64adee20 | 2,364 | py | Python | examples/self_distribution_distillation/self_distribution_distillation_src/uncertainty/categorical.py | yassirf/sequence-training | a7cd7a9bb2f4f1d76c4f62a62704e396ad029540 | [
"MIT"
] | null | null | null | examples/self_distribution_distillation/self_distribution_distillation_src/uncertainty/categorical.py | yassirf/sequence-training | a7cd7a9bb2f4f1d76c4f62a62704e396ad029540 | [
"MIT"
] | null | null | null | examples/self_distribution_distillation/self_distribution_distillation_src/uncertainty/categorical.py | yassirf/sequence-training | a7cd7a9bb2f4f1d76c4f62a62704e396ad029540 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from .estimators import EnsembleCategoricals
from .misc import process_outputs
def compute_token_uncertainties(args, outputs, extra):
"""
Function which computes token-level measures of uncertainty for Categorical model.
:param args: specifies uncertainty estimation parameters
:param outputs: List of Tensors of size [batch_size, seq_len, vocab] of Log Dirichlet Concentrations
:return: Tensors of token level uncertainties of size [batch_size, seq_len]
"""
outputs = process_outputs(outputs, extra)
estimator = EnsembleCategoricals()
returns = estimator(args, outputs)
return returns['entropy_expected'].clamp_(min=0.0, max=None), \
returns['expected_entropy'].clamp_(min=0.0, max=None), \
returns['mutual_information'].clamp_(min=0.0, max=None)
def compute_sequence_uncertainties(args, outputs, extra, output_ids, output_length, mask):
"""
Function which computes sequence-level measures of uncertainty for Categorical model.
:param args: specifies uncertainty estimation parameters
:param outputs: List of Tensors of size [batch_size, seq_len, vocab] of Logits
:param output_ids: Tensor of size [batch_size, seq_len] of token ids
:param output_length: Tensor of size [batch_size, seq_len] of masked token ids
:param mask: Tensor of size [batch_size] of masked token ids
:return: Tuple of tensor score, sentence log-probability and token log-probabilities
"""
outputs = process_outputs(outputs, extra)
# Compute the expectation
expected = torch.stack(outputs, dim=2)
# Normalise results (batch, seqlen, models, vocab)
expected = torch.log_softmax(expected, dim=-1)
# Expected results (batch, seqlen, vocab)
expected = torch.logsumexp(expected, dim=2) - np.log(expected.size(2))
# Now (batch, seqlen, 1)
unsqueezed_ids = output_ids.unsqueeze(-1)
# Now (batch, seqlen)
token_log_probs = expected.gather(-1, unsqueezed_ids).squeeze(2)
# Remove any uncertainties outside mask
if mask.any(): token_log_probs.masked_fill_(mask, 0.0)
# Now get sentence and averaged scores
log_probs = token_log_probs.sum(dim=1)
scores = -log_probs / output_length
# Return score, sentence log-probability, token probabilities
return scores, log_probs, token_log_probs | 39.4 | 104 | 0.730118 |
7953f596ae99c9a63c9df6e5724925b0a154ff89 | 3,499 | py | Python | scripts/utils/travelled_dist.py | tpet/rpz_planning | cf52732bfac8aef7d1ba9da20e3930671e142b80 | [
"BSD-3-Clause"
] | null | null | null | scripts/utils/travelled_dist.py | tpet/rpz_planning | cf52732bfac8aef7d1ba9da20e3930671e142b80 | [
"BSD-3-Clause"
] | null | null | null | scripts/utils/travelled_dist.py | tpet/rpz_planning | cf52732bfac8aef7d1ba9da20e3930671e142b80 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
import numpy as np
from ros_numpy import msgify, numpify
import tf2_ros
class TravelledDistPub:
"""
This ROS node publishes ground truth travelled distance and route.
"""
def __init__(self):
self.tf = tf2_ros.Buffer()
self.tl = tf2_ros.TransformListener(self.tf)
self.rate = 10 # rate = 10 -> dt = 0.1
self.robot = rospy.get_param('robot', 'X1')
self.world_frame = rospy.get_param('world_frame', 'subt')
# travelled dist to publish
self.travelled_dist = 0.0
self.eps = 0.005
self.robot_position = None
self.initialized_pose = False
# route to publish
self.route = Path()
self.route.header.frame_id = self.world_frame
self.wps_dist = rospy.get_param('~route_wps_dist', 1.0) # [m], dist between sampled path waypoints
self.route_pub = rospy.Publisher('~route', Path, queue_size=2)
self.dist_pub = rospy.Publisher('~travelled_dist', Float64, queue_size=2)
def run(self):
rate = rospy.Rate(self.rate)
prev_wp = None
while not rospy.is_shutdown():
# travelled distance computation
try:
transform = self.tf.lookup_transform(self.world_frame, self.robot+'_ground_truth',
rospy.Time(0), rospy.Duration(1))
T = numpify(transform.transform)
prev_position = T[:3, 3]
if not self.initialized_pose:
self.robot_position = prev_position
prev_wp = prev_position
self.initialized_pose = True
# publish travelled distance so far
dp = np.linalg.norm(self.robot_position - prev_position)
dp = dp if dp > self.eps else 0.0 # do not add negligible movement
self.travelled_dist += dp
self.robot_position = prev_position
self.dist_pub.publish(Float64(self.travelled_dist))
# add waypoints every wps_dist to a route and publish it
dwp = np.linalg.norm(self.robot_position - prev_wp)
if dwp >= self.wps_dist:
rospy.logdebug('Travelled distance: %.1f', self.travelled_dist)
# append wp to path
pose = PoseStamped()
pose.header.frame_id = self.world_frame
pose.header.stamp = rospy.Time.now()
pose.pose.position.x = transform.transform.translation.x
pose.pose.position.y = transform.transform.translation.y
pose.pose.position.z = transform.transform.translation.z
pose.pose.orientation = transform.transform.rotation
self.route.poses.append(pose)
self.route.header.stamp = rospy.Time.now()
self.route_pub.publish(self.route)
prev_wp = self.robot_position
except (tf2_ros.LookupException, rospy.exceptions.ROSTimeMovedBackwardsException):
rospy.logwarn('Robot ground truth pose is not available')
rate.sleep()
if __name__ == '__main__':
rospy.init_node('travelled_dist_publisher', log_level=rospy.INFO)
proc = TravelledDistPub()
proc.run()
| 39.314607 | 107 | 0.592741 |
7953f6b7489acf345c8141ee58005c051f60c80b | 5,542 | py | Python | examples/multi_physics/biot_npbc.py | clazaro/sfepy | 78757a6989d6aaf85a3fb27957b9179c5e2aa2c7 | [
"BSD-3-Clause"
] | 510 | 2015-01-19T16:22:25.000Z | 2022-03-30T19:02:51.000Z | examples/multi_physics/biot_npbc.py | clazaro/sfepy | 78757a6989d6aaf85a3fb27957b9179c5e2aa2c7 | [
"BSD-3-Clause"
] | 402 | 2015-01-22T10:57:50.000Z | 2022-03-30T15:19:23.000Z | examples/multi_physics/biot_npbc.py | clazaro/sfepy | 78757a6989d6aaf85a3fb27957b9179c5e2aa2c7 | [
"BSD-3-Clause"
] | 156 | 2015-01-05T14:23:38.000Z | 2022-03-22T13:08:30.000Z | r"""
Biot problem - deformable porous medium with the no-penetration boundary
condition on a boundary region.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v})
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} q\ \alpha_{ij} e_{ij}(\ul{u})
+ \int_{\Omega} K_{ij} \nabla_i q \nabla_j p
= 0
\;, \quad \forall q \;,
\ul{u} \cdot \ul{n} = 0 \mbox{ on } \Gamma_{walls} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
import os
import numpy as nm
from sfepy.linalg import get_coors_in_tube
from sfepy.mechanics.matcoefs import stiffness_from_lame
def define():
from sfepy import data_dir
filename = data_dir + '/meshes/3d/cylinder.mesh'
output_dir = 'output'
return define_input(filename, output_dir)
def cinc_simple(coors, mode):
axis = nm.array([1, 0, 0], nm.float64)
if mode == 0: # In
centre = nm.array([0.0, 0.0, 0.0], nm.float64)
radius = 0.019
length = 0.00002
elif mode == 1: # Out
centre = nm.array([0.1, 0.0, 0.0], nm.float64)
radius = 0.019
length = 0.00002
elif mode == 2: # Rigid
centre = nm.array([0.05, 0.0, 0.0], nm.float64)
radius = 0.015
length = 0.03
else:
raise ValueError('unknown mode %s!' % mode)
return get_coors_in_tube(coors,
centre, axis, -1, radius, length)
def define_regions(filename):
if filename.find('simple.mesh'):
dim = 3
regions = {
'Omega' : 'all',
'Walls' : ('vertices of surface -v (r.Outlet +f r.Inlet)', 'facet'),
'Inlet' : ('vertices by cinc_simple0', 'facet'),
'Outlet' : ('vertices by cinc_simple1', 'facet'),
'Rigid' : 'vertices by cinc_simple2',
}
else:
raise ValueError('unknown mesh %s!' % filename)
return regions, dim
def get_pars(ts, coor, mode, output_dir='.', **kwargs):
if mode == 'qp':
n_nod, dim = coor.shape
sym = (dim + 1) * dim // 2
out = {}
out['D'] = nm.tile(stiffness_from_lame(dim, lam=1.7, mu=0.3),
(coor.shape[0], 1, 1))
aa = nm.zeros((sym, 1), dtype=nm.float64)
aa[:dim] = 0.132
aa[dim:sym] = 0.092
out['alpha'] = nm.tile(aa, (coor.shape[0], 1, 1))
perm = nm.eye(dim, dtype=nm.float64)
out['K'] = nm.tile(perm, (coor.shape[0], 1, 1))
return out
def post_process(out, pb, state, extend=False):
from sfepy.base.base import Struct
dvel = pb.evaluate('ev_diffusion_velocity.i.Omega( m.K, p )',
mode='el_avg')
out['dvel'] = Struct(name='output_data',
mode='cell', data=dvel, dofs=None)
stress = pb.evaluate('ev_cauchy_stress.i.Omega( m.D, u )',
mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data',
mode='cell', data=stress, dofs=None)
return out
def define_input(filename, output_dir):
filename_mesh = filename
options = {
'output_dir' : output_dir,
'output_format' : 'vtk',
'post_process_hook' : 'post_process',
'ls' : 'ls',
'nls' : 'newton',
}
functions = {
'cinc_simple0' : (lambda coors, domain:
cinc_simple(coors, 0),),
'cinc_simple1' : (lambda coors, domain:
cinc_simple(coors, 1),),
'cinc_simple2' : (lambda coors, domain:
cinc_simple(coors, 2),),
'get_pars' : (lambda ts, coors, mode=None, **kwargs:
get_pars(ts, coors, mode,
output_dir=output_dir, **kwargs),),
}
regions, dim = define_regions(filename_mesh)
field_1 = {
'name' : 'displacement',
'dtype' : nm.float64,
'shape' : dim,
'region' : 'Omega',
'approx_order' : 1,
}
field_2 = {
'name' : 'pressure',
'dtype' : nm.float64,
'shape' : 1,
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
}
ebcs = {
'inlet' : ('Inlet', {'p.0' : 1.0, 'u.all' : 0.0}),
'outlet' : ('Outlet', {'p.0' : -1.0}),
}
lcbcs = {
'rigid' : ('Outlet', {'u.all' : None}, None, 'rigid'),
'no_penetration' : ('Walls', {'u.all' : None}, None,
'no_penetration', None),
}
material_1 = {
'name' : 'm',
'function' : 'get_pars',
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'eq_1' :
"""dw_lin_elastic.i.Omega( m.D, v, u )
- dw_biot.i.Omega( m.alpha, v, p )
= 0""",
'eq_2' :
"""dw_biot.i.Omega( m.alpha, u, q )
+ dw_diffusion.i.Omega( m.K, q, p )
= 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct', # Direct solver.
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
}
return locals()
| 27.71 | 80 | 0.496211 |
7953f6bb99158441109893b302a3c7d1f1a3ca52 | 1,686 | py | Python | sparkql/accessors.py | radeklat/sparkql | 57d55c7599460f2e7e5957c037d7c25cedb92647 | [
"MIT"
] | null | null | null | sparkql/accessors.py | radeklat/sparkql | 57d55c7599460f2e7e5957c037d7c25cedb92647 | [
"MIT"
] | null | null | null | sparkql/accessors.py | radeklat/sparkql | 57d55c7599460f2e7e5957c037d7c25cedb92647 | [
"MIT"
] | null | null | null | """Accessor functions in order to access field paths, field names, and related field attributes."""
# pylint: disable=protected-access
from typing import Sequence
from pyspark.sql import Column
from pyspark.sql import functions as sql_funcs
from pyspark.sql.types import StructField
from sparkql.fields.base import BaseField
def path_seq(field: BaseField) -> Sequence[str]:
"""Items on the path to a field."""
fields = [field]
while fields[0]._parent is not None:
fields.insert(0, fields[0]._parent)
assert all(
field._resolve_field_name() is not None for field in fields
), f"Encountered an unset name while traversing path. Path is: {_pretty_path(fields)}"
return [f._field_name for f in fields]
def path_str(field: BaseField) -> str:
"""Return dot-delimited path to field `field`."""
return ".".join(path_seq(field))
def path_col(field: BaseField) -> Column:
"""Return Spark column pointing to field `field`."""
fields_seq = path_seq(field)
col: Column = sql_funcs.col(fields_seq[0]) # pylint: disable=no-member
for col_field_name in fields_seq[1:]:
col = col[col_field_name]
return col
def name(field: BaseField) -> str:
"""Return field name of field `field`."""
return field._field_name
def struct_field(field: BaseField) -> StructField:
"""Return the equivalent PySpark StructField of field `field`."""
return field._spark_struct_field
def _pretty_path(path: Sequence[BaseField]):
"""Build pretty string of path, for debug and/or error purposes."""
return "< " + " -> ".join(f"'{field._resolve_field_name()}' ({type(field).__name__})" for field in path) + " >"
| 31.222222 | 115 | 0.697509 |
7953f7bbfd1f41ed9738ffec1e04dd2d56131960 | 4,118 | py | Python | release/sign-launcher.py | sambacha/repo | e4d8f5a2f332854acb140b22f1b96fd06c42b66e | [
"Apache-2.0"
] | 1 | 2021-12-11T01:57:58.000Z | 2021-12-11T01:57:58.000Z | release/sign-launcher.py | sambacha/repo | e4d8f5a2f332854acb140b22f1b96fd06c42b66e | [
"Apache-2.0"
] | null | null | null | release/sign-launcher.py | sambacha/repo | e4d8f5a2f332854acb140b22f1b96fd06c42b66e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper tool for signing repo launcher scripts correctly.
This is intended to be run only by the official Repo release managers.
"""
import argparse
import os
import subprocess
import sys
import util
def sign(opts):
"""Sign the launcher!"""
output = ""
for key in opts.keys:
# We use ! at the end of the key so that gpg uses this specific key.
# Otherwise it uses the key as a lookup into the overall key and uses the
# default signing key. i.e. It will see that KEYID_RSA is a subkey of
# another key, and use the primary key to sign instead of the subkey.
cmd = [
"gpg",
"--homedir",
opts.gpgdir,
"-u",
f"{key}!",
"--batch",
"--yes",
"--armor",
"--detach-sign",
"--output",
"-",
opts.launcher,
]
ret = util.run(opts, cmd, encoding="utf-8", stdout=subprocess.PIPE)
output += ret.stdout
# Save the combined signatures into one file.
with open(f"{opts.launcher}.asc", "w", encoding="utf-8") as fp:
fp.write(output)
def check(opts):
"""Check the signature."""
util.run(opts, ["gpg", "--verify", f"{opts.launcher}.asc"])
def postmsg(opts):
"""Helpful info to show at the end for release manager."""
print(
f"""
Repo launcher bucket:
gs://git-repo-downloads/
To upload this launcher directly:
gsutil cp -a public-read {opts.launcher} {opts.launcher}.asc gs://git-repo-downloads/
NB: You probably want to upload it with a specific version first, e.g.:
gsutil cp -a public-read {opts.launcher} gs://git-repo-downloads/repo-3.0
gsutil cp -a public-read {opts.launcher}.asc gs://git-repo-downloads/repo-3.0.asc
"""
)
def get_parser():
"""Get a CLI parser."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-n",
"--dry-run",
dest="dryrun",
action="store_true",
help="show everything that would be done",
)
parser.add_argument(
"--gpgdir",
default=os.path.join(util.HOMEDIR, ".gnupg", "repo"),
help="path to dedicated gpg dir with release keys " "(default: ~/.gnupg/repo/)",
)
parser.add_argument(
"--keyid",
dest="keys",
default=[],
action="append",
help="alternative signing keys to use",
)
parser.add_argument(
"launcher",
default=os.path.join(util.TOPDIR, "repo"),
nargs="?",
help="the launcher script to sign",
)
return parser
def main(argv):
"""The main func!"""
parser = get_parser()
opts = parser.parse_args(argv)
if not os.path.exists(opts.gpgdir):
parser.error(f"--gpgdir does not exist: {opts.gpgdir}")
if not os.path.exists(opts.launcher):
parser.error(f"launcher does not exist: {opts.launcher}")
opts.launcher = os.path.relpath(opts.launcher)
print(
f'Signing "{opts.launcher}" launcher script and saving to '
f'"{opts.launcher}.asc"'
)
if opts.keys:
print(f'Using custom keys to sign: {" ".join(opts.keys)}')
else:
print("Using official Repo release keys to sign")
opts.keys = [util.KEYID_DSA, util.KEYID_RSA, util.KEYID_ECC]
util.import_release_key(opts)
sign(opts)
check(opts)
postmsg(opts)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 28.597222 | 88 | 0.616561 |
7953f9d21dbd0d6b5eeeb0ced4c7d078a81ced37 | 773 | py | Python | setup.py | vdltech/napalm-ruckus-fastiron | 4f49ff5dbceefef2336951236e043ae6c7ea38d5 | [
"Apache-2.0"
] | null | null | null | setup.py | vdltech/napalm-ruckus-fastiron | 4f49ff5dbceefef2336951236e043ae6c7ea38d5 | [
"Apache-2.0"
] | null | null | null | setup.py | vdltech/napalm-ruckus-fastiron | 4f49ff5dbceefef2336951236e043ae6c7ea38d5 | [
"Apache-2.0"
] | null | null | null | """setup.py file."""
from setuptools import find_packages, setup
__author__ = 'Johan van den Dorpe'
with open("requirements.txt", "r") as fs:
reqs = [r for r in fs.read().splitlines()]
setup(
name="napalm-brocade-fastiron",
version="0.11",
packages=find_packages(),
author="Johan van den Dorpe",
description="Network Automation and Programmability Abstraction Layer with Multivendor support",
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
url="https://github.com/vdltech/napalm-brocade-fastiron",
include_package_data=True,
install_requires=reqs,
)
| 29.730769 | 100 | 0.65718 |
7953fae78c2a926fe227be8a6f7620531224614d | 260 | py | Python | consultas/consultas/doctype/antibiotico/antibiotico.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | consultas/consultas/doctype/antibiotico/antibiotico.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | consultas/consultas/doctype/antibiotico/antibiotico.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Lewin Villar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Antibiotico(Document):
pass
| 23.636364 | 51 | 0.780769 |
7953faf5f5cca2ef0f3c179174903cf9902a33a3 | 15,939 | py | Python | cogs/admin.py | SarrowSeviper/Bui | 8d1c89566d5be1f882666cabc77cfbfeee5d56e3 | [
"MIT"
] | 1 | 2018-10-13T16:06:02.000Z | 2018-10-13T16:06:02.000Z | cogs/admin.py | SarrowSeviper/Bui | 8d1c89566d5be1f882666cabc77cfbfeee5d56e3 | [
"MIT"
] | 13 | 2018-10-31T21:15:05.000Z | 2018-10-31T21:15:16.000Z | cogs/admin.py | BuiArtSociety/buibot | b362aa8e61cc79fd3b9575f364ea610ba4d0a0d9 | [
"MIT"
] | 1 | 2018-11-25T11:54:15.000Z | 2018-11-25T11:54:15.000Z | import time
import aiohttp
import traceback
import discord
import textwrap
import io
import datetime
import random
import json
import shlex
import gc
import os
from subprocess import Popen, PIPE
from dhooks import Webhook
from contextlib import redirect_stdout
from copy import copy
from typing import Union
from utils import repo, default, http, dataIO
from discord.ext import commands
from utils.formats import TabularData, Plural
class Admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
self._last_result = None
@staticmethod
def cleanup_code(content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
@staticmethod
def generatecode():
code = random.randint(11111, 99999)
return f"{code}"
@commands.command()
@commands.check(repo.is_owner)
async def reload(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.unload_extension(f"cogs.{name}")
self.bot.load_extension(f"cogs.{name}")
except Exception as e:
await ctx.send(f"```\n{e}```")
return
await ctx.send(f"Reloaded extension **{name}.py**")
@commands.command()
@commands.check(repo.is_owner)
async def reboot(self, ctx):
""" Reboot the bot """
await ctx.send('Rebooting now...')
time.sleep(1)
await self.bot.db.close()
await self.bot.logout()
@commands.command()
@commands.check(repo.is_owner)
async def load(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.load_extension(f"cogs.{name}")
except Exception as e:
await ctx.send(f"```diff\n- {e}```")
return
await ctx.send(f"Loaded extension **{name}.py**")
@commands.command()
@commands.check(repo.is_owner)
async def unload(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.unload_extension(f"cogs.{name}")
except Exception as e:
await ctx.send(f"```diff\n- {e}```")
return
await ctx.send(f"Unloaded extension **{name}.py**")
@commands.group()
@commands.check(repo.is_owner)
async def change(self, ctx):
if ctx.invoked_subcommand is None:
_help = await ctx.bot.formatter.format_help_for(ctx, ctx.command)
for page in _help:
await ctx.send(page)
@change.command(name="playing")
@commands.check(repo.is_owner)
async def change_playing(self, ctx, *, playing: str):
""" Change playing status. """
try:
await self.bot.change_presence(
activity=discord.Game(type=0, name=playing),
status=discord.Status.online
)
dataIO.change_value("config.json", "playing", playing)
await ctx.send(f"Successfully changed playing status to **{playing}**")
except discord.InvalidArgument as err:
await ctx.send(err)
except Exception as e:
await ctx.send(e)
@change.command(name="username")
@commands.check(repo.is_owner)
async def change_username(self, ctx, *, name: str):
""" Change username. """
try:
await self.bot.user.edit(username=name)
await ctx.send(f"Successfully changed username to **{name}**")
except discord.HTTPException as err:
await ctx.send(err)
@change.command(name="nickname")
@commands.check(repo.is_owner)
async def change_nickname(self, ctx, *, name: str = None):
""" Change nickname. """
try:
await ctx.guild.me.edit(nick=name)
if name:
await ctx.send(f"Successfully changed nickname to **{name}**")
else:
await ctx.send("Successfully removed nickname")
except Exception as err:
await ctx.send(err)
@change.command(name="avatar")
@commands.check(repo.is_owner)
async def change_avatar(self, ctx, url: str = None):
""" Change avatar. """
if url is None and len(ctx.message.attachments) == 1:
url = ctx.message.attachments[0].url
else:
url = url.strip('<>')
try:
bio = await http.get(url, res_method="read")
await self.bot.user.edit(avatar=bio)
await ctx.send(f"Successfully changed the avatar. Currently using:\n{url}")
except aiohttp.InvalidURL:
await ctx.send("The URL is invalid...")
except discord.InvalidArgument:
await ctx.send("This URL does not contain a useable image")
except discord.HTTPException as err:
await ctx.send(err)
@commands.command()
@commands.check(repo.is_owner)
async def args(self, ctx, *args):
"""Returns the number of args"""
await ctx.send('{} arguments: {}'.format(len(args), ', '.join(args)))
@commands.command()
async def amiadmin(self, ctx):
""" Are you admin? """
if ctx.author.id in self.config.owners:
return await ctx.send(f"Yes **{ctx.author.name}** you are admin! ✅")
await ctx.send(f"no, heck off {ctx.author.name}")
@commands.command()
@commands.guild_only()
@commands.check(repo.is_owner)
async def resetwarns(self, ctx, member: discord.Member):
""" Resets user warnings """
query = "SELECT warnings FROM warnings WHERE userid = $1;"
row = await self.bot.db.fetchrow(query, member.id)
if row is None:
await ctx.send("They are not registered in the database! I'll add them now!")
query = "INSERT INTO warnings VALUES ($1, 0);"
await self.bot.db.execute(query, member.id)
else:
query = "UPDATE warnings SET warnings = 0 WHERE userid = $1;"
await self.bot.db.execute(query, member.id)
logchannel = self.bot.get_channel(499327315088769025)
await ctx.send(f"I reset {member.mention}'s warns!")
await logchannel.send(f"I reset {member.mention}'s warns!")
@commands.command()
@commands.guild_only()
@commands.check(repo.is_owner)
async def setupvotes(self, ctx, member: discord.Member, votestoset: int = 0):
"""Does what it says on the tin"""
query = "SELECT * FROM artstats WHERE userid=$1"
row = await self.bot.db.fetchrow(query, member.id)
if row is None:
query = "INSERT INTO artstats VALUES ($1, $2);"
await self.bot.db.execute(query, member.id, votestoset)
return await ctx.send(f"**{member.name}** has been set with **{votestoset}** upvotes.")
else:
query = "UPDATE artstats SET upvotes=$2 WHERE userid=$1"
await self.bot.db.execute(query, member.id, votestoset)
await ctx.send(f"**{member.name}** has been set with **{votestoset}** upvotes.")
@commands.command()
@commands.guild_only()
@commands.check(repo.is_owner)
async def manualsketchdaily(self, ctx):
"""
Manually send off a daily sketch
"""
dayandmonth = datetime.date.today()
row = await self.bot.db.fetchrow("SELECT * FROM sketchdaily ORDER BY RANDOM() LIMIT 1;")
if row is None:
return print("There are no suggestions...")
print('True, sending webhook message')
webhook = Webhook(url=f'{self.config.webhookurl}', content=f"<@&509164409604669450>\n\nThe prompt for {dayandmonth.day}/{dayandmonth.month}/{dayandmonth.year} is:\n\n**{row['idea']}**\n\nIt was suggested by **{row['artist']}**\n\nPlease post your submission below this line!\n\n===================")
webhook.execute()
sketchcode = row['code']
query = "DELETE FROM sketchdaily WHERE code=$1;"
await self.bot.db.execute(query, sketchcode)
@commands.command()
@commands.guild_only()
@commands.check(repo.is_owner)
async def registersketch(self, ctx, artist: str = None, *, sketch: str = None):
"""
Adds a database entry for sketchdaily
"""
if artist is None:
return await ctx.send("Please include a user!")
if sketch is None:
return await ctx.send("Please include an idea!")
code = self.generatecode()
query = "INSERT INTO sketchdaily VALUES ($1, $2, $3);"
await self.bot.db.execute(query, int(code), artist, sketch)
await ctx.send(f"I have successfully added the idea \"{sketch}\" by \"{artist}\" with the tag {code} to the database!")
@commands.command(pass_context=True, name='eval')
@commands.check(repo.is_owner)
async def _eval(self, ctx, *, body: str):
"""Evaluates a code"""
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'_': self._last_result
}
if ctx.author.id != 127452209070735361:
return
if "bot.http.token" in body:
return await ctx.send(f"You can't take my token {ctx.author.name}")
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
reactiontosend = self.bot.get_emoji(508388437661843483)
await ctx.message.add_reaction(reactiontosend)
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
if self.config.token in ret:
ret = self.config.realtoken
self._last_result = ret
await ctx.send(f'Inputted code:\n```py\n{body}\n```\n\nOutputted Code:\n```py\n{value}{ret}\n```')
@commands.group(aliases=["as"])
@commands.check(repo.is_owner)
async def sudo(self, ctx):
"""Run a cmd under an altered context
"""
if ctx.invoked_subcommand is None:
await ctx.send("...")
@sudo.command(aliases=["u", "--u", "--user", "user"])
@commands.check(repo.is_owner)
async def sudo_user(self, ctx, who: Union[discord.Member, discord.User], *, command: str):
"""Run a cmd under someone else's name
"""
msg = copy(ctx.message)
msg.author = who
msg.content = ctx.prefix + command
new_ctx = await self.bot.get_context(msg)
await self.bot.invoke(new_ctx)
@sudo.command(aliases=["c", "--c", "--channel", "channel"])
@commands.check(repo.is_owner)
async def sudo_channel(self, ctx, chid: int, *, command: str):
"""Run a command as another user."""
cmd = copy(ctx.message)
cmd.channel = self.bot.get_channel(chid)
cmd.content = ctx.prefix + command
new_ctx = await self.bot.get_context(cmd)
await self.bot.invoke(new_ctx)
@commands.command()
@commands.check(repo.is_owner)
async def blacklist(self, ctx, uid: int):
with open("blacklist.json", "r+") as file:
content = json.load(file)
content["blacklist"].append(uid)
file.seek(0)
json.dump(content, file)
file.truncate()
await ctx.send(f"I have successfully blacklisted the id **{uid}**")
@commands.command()
@commands.check(repo.is_owner)
async def cogs(self, ctx):
""" Gives all loaded cogs """
mod = ", ".join(list(self.bot.cogs))
await ctx.send(f"The current modules are:\n```\n{mod}\n```")
@commands.command(hidden=True)
@commands.check(repo.is_owner)
async def sql(self, ctx, *, query: str):
"""Run some SQL."""
if ctx.author.id != (127452209070735361 or 101000550874644480):
return
query = self.cleanup_code(query)
is_multistatement = query.count(";") > 1
if is_multistatement:
strategy = self.bot.db.execute
else:
strategy = self.bot.db.fetch
try:
start = time.perf_counter()
results = await strategy(query)
dt = (time.perf_counter() - start) * 1000.0
except Exception:
return await ctx.send(f"```py\n{traceback.format_exc()}\n```")
rows = len(results)
if is_multistatement or rows == 0:
return await ctx.send(f"`{dt:.2f}ms: {results}`")
headers = list(results[0].keys())
table = TabularData()
table.set_columns(headers)
table.add_rows(list(r.values()) for r in results)
render = table.render()
fmt = f"```\n{render}\n```\n*Returned {Plural(row=rows)} in {dt:.2f}ms*"
if len(fmt) > 2000:
fp = io.BytesIO(fmt.encode("utf-8"))
await ctx.send("Too many results...", file=discord.File(fp, "results.txt"))
else:
await ctx.send(fmt)
@commands.command()
@commands.check(repo.is_owner)
async def shell(self, ctx: commands.Context, *, command: str) -> None:
""" Run a shell command. """
if ctx.author.id != 127452209070735361:
return
def run_shell(command):
with Popen(command, stdout=PIPE, stderr=PIPE, shell=True) as proc:
return [std.decode("utf-8") for std in proc.communicate()]
command = self.cleanup_code(command)
argv = shlex.split(command)
stdout, stderr = await self.bot.loop.run_in_executor(None, run_shell, argv)
if stdout:
if len(stdout) >= 1500:
print(stdout)
return await ctx.send("Too big I'll print it instead")
await ctx.send(f"```\n{stdout}\n```")
if stderr:
if len(stderr) >= 1500:
print(stderr)
return await ctx.send("Too big I'll print it instead")
await ctx.send(f"```\n{stderr}\n```")
@commands.command()
@commands.guild_only()
@commands.check(repo.is_owner)
async def speedup(self, ctx):
await ctx.message.add_reaction("a:loading:528744937794043934")
gc.collect()
del gc.garbage[:]
await ctx.message.remove_reaction("a:loading:528744937794043934", member=ctx.me)
await ctx.message.add_reaction(":done:513831607262511124")
@commands.command(hidden=True, aliases=["pull"])
@commands.check(repo.is_owner)
async def update(self, ctx, silently: bool = False):
""" Gets latest commits and applies them from git """
def run_shell(command):
with Popen(command, stdout=PIPE, stderr=PIPE, shell=True) as proc:
return [std.decode("utf-8") for std in proc.communicate()]
pull = await self.bot.loop.run_in_executor(
None, run_shell, "git pull origin master"
)
msg = await ctx.send(f"```css\n{pull}\n```")
for file in os.listdir("cogs"):
if file.endswith(".py"):
name = file[:-3]
self.bot.unload_extension(f"cogs.{name}")
self.bot.load_extension(f"cogs.{name}")
def setup(bot):
bot.add_cog(Admin(bot))
| 36.981439 | 307 | 0.583098 |
7953fb9f581b085ec1a9f8c794f68b3d1c1df395 | 8,128 | py | Python | nilearn/input_data/tests/test_multi_nifti_masker.py | ryanhammonds/nilearn | f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2 | [
"BSD-2-Clause"
] | null | null | null | nilearn/input_data/tests/test_multi_nifti_masker.py | ryanhammonds/nilearn | f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2 | [
"BSD-2-Clause"
] | null | null | null | nilearn/input_data/tests/test_multi_nifti_masker.py | ryanhammonds/nilearn | f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2 | [
"BSD-2-Clause"
] | null | null | null | """
Test the multi_nifti_masker module
"""
# Author: Gael Varoquaux
# License: simplified BSD
import shutil
from distutils.version import LooseVersion
from tempfile import mkdtemp
import nibabel
import numpy as np
import sklearn
from nibabel import Nifti1Image
from nose import SkipTest
from nose.tools import assert_true, assert_false, assert_raises, assert_equal
from numpy.testing import assert_array_equal
from nilearn._utils.compat import Memory
from nilearn._utils.exceptions import DimensionError
from nilearn._utils.testing import assert_raises_regex, write_tmp_imgs
from nilearn.input_data.multi_nifti_masker import MultiNiftiMasker
from nilearn.image import get_data
def test_auto_mask():
# This mostly a smoke test
data = np.zeros((9, 9, 9))
data[2:-2, 2:-2, 2:-2] = 10
img = Nifti1Image(data, np.eye(4))
masker = MultiNiftiMasker(mask_args=dict(opening=0))
# Check that if we have not fit the masker we get a intelligible
# error
assert_raises(ValueError, masker.transform, [[img, ]])
# Check error return due to bad data format
assert_raises(ValueError, masker.fit, img)
# Smoke test the fit
masker.fit([[img]])
# Test mask intersection
data2 = np.zeros((9, 9, 9))
data2[1:-3, 1:-3, 1:-3] = 10
img2 = Nifti1Image(data2, np.eye(4))
masker.fit([[img, img2]])
assert_array_equal(get_data(masker.mask_img_),
np.logical_or(data, data2))
# Smoke test the transform
masker.transform([[img, ]])
# It should also work with a 3D image
masker.transform(img)
# check exception when transform() called without prior fit()
masker2 = MultiNiftiMasker(mask_img=img)
assert_raises_regex(
ValueError,
'has not been fitted. ', masker2.transform, img2)
def test_nan():
data = np.ones((9, 9, 9))
data[0] = np.nan
data[:, 0] = np.nan
data[:, :, 0] = np.nan
data[-1] = np.nan
data[:, -1] = np.nan
data[:, :, -1] = np.nan
data[3:-3, 3:-3, 3:-3] = 10
img = Nifti1Image(data, np.eye(4))
masker = MultiNiftiMasker(mask_args=dict(opening=0))
masker.fit([img])
mask = get_data(masker.mask_img_)
assert_true(mask[1:-1, 1:-1, 1:-1].all())
assert_false(mask[0].any())
assert_false(mask[:, 0].any())
assert_false(mask[:, :, 0].any())
assert_false(mask[-1].any())
assert_false(mask[:, -1].any())
assert_false(mask[:, :, -1].any())
def test_different_affines():
# Mask and EIP files with different affines
mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8),
affine=np.diag((4, 4, 4, 1)))
epi_img1 = Nifti1Image(np.ones((4, 4, 4, 3)),
affine=np.diag((2, 2, 2, 1)))
epi_img2 = Nifti1Image(np.ones((3, 3, 3, 3)),
affine=np.diag((3, 3, 3, 1)))
masker = MultiNiftiMasker(mask_img=mask_img)
epis = masker.fit_transform([epi_img1, epi_img2])
for this_epi in epis:
masker.inverse_transform(this_epi)
def test_3d_images():
# Test that the MultiNiftiMasker works with 3D images
mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8),
affine=np.diag((4, 4, 4, 1)))
epi_img1 = Nifti1Image(np.ones((2, 2, 2)),
affine=np.diag((4, 4, 4, 1)))
epi_img2 = Nifti1Image(np.ones((2, 2, 2)),
affine=np.diag((2, 2, 2, 1)))
masker = MultiNiftiMasker(mask_img=mask_img)
epis = masker.fit_transform([epi_img1, epi_img2])
# This is mostly a smoke test
assert_equal(len(epis), 2)
# verify that 4D mask arguments are refused
mask_img_4d = Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8),
affine=np.diag((4, 4, 4, 1)))
masker2 = MultiNiftiMasker(mask_img=mask_img_4d)
assert_raises_regex(DimensionError,
"Input data has incompatible dimensionality: "
"Expected dimension is 3D and you provided "
"a 4D image.",
masker2.fit)
def test_joblib_cache():
from nilearn._utils.compat import hash
# Dummy mask
mask = np.zeros((40, 40, 40))
mask[20, 20, 20] = 1
mask_img = Nifti1Image(mask, np.eye(4))
with write_tmp_imgs(mask_img, create_files=True) as filename:
masker = MultiNiftiMasker(mask_img=filename)
masker.fit()
mask_hash = hash(masker.mask_img_)
get_data(masker.mask_img_)
assert_true(mask_hash == hash(masker.mask_img_))
# enables to delete "filename" on windows
del masker
def test_shelving():
mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8),
affine=np.diag((4, 4, 4, 1)))
epi_img1 = Nifti1Image(np.ones((2, 2, 2)),
affine=np.diag((4, 4, 4, 1)))
epi_img2 = Nifti1Image(np.ones((2, 2, 2)),
affine=np.diag((2, 2, 2, 1)))
cachedir = mkdtemp()
try:
masker_shelved = MultiNiftiMasker(mask_img=mask_img,
memory=Memory(cachedir=cachedir,
mmap_mode='r',
verbose=0))
masker_shelved._shelving = True
masker = MultiNiftiMasker(mask_img=mask_img)
epis_shelved = masker_shelved.fit_transform([epi_img1, epi_img2])
epis = masker.fit_transform([epi_img1, epi_img2])
for epi_shelved, epi in zip(epis_shelved, epis):
epi_shelved = epi_shelved.get()
assert_array_equal(epi_shelved, epi)
epi = masker.fit_transform(epi_img1)
epi_shelved = masker_shelved.fit_transform(epi_img1)
epi_shelved = epi_shelved.get()
assert_array_equal(epi_shelved, epi)
finally:
# enables to delete "filename" on windows
del masker
shutil.rmtree(cachedir, ignore_errors=True)
def test_compute_multi_gray_matter_mask():
# Check mask is correctly is correctly calculated
imgs = [Nifti1Image(np.random.rand(9, 9, 5), np.eye(4)),
Nifti1Image(np.random.rand(9, 9, 5), np.eye(4))]
masker = MultiNiftiMasker(mask_strategy='template')
masker.fit(imgs)
# Check that the order of the images does not change the output
masker2 = MultiNiftiMasker(mask_strategy='template')
masker2.fit(imgs[::-1])
mask = masker.mask_img_
mask2 = masker2.mask_img_
mask_ref = np.zeros((9, 9, 5))
mask_ref[2:7, 2:7, 2] = 1
np.testing.assert_array_equal(get_data(mask), mask_ref)
np.testing.assert_array_equal(get_data(mask2), mask_ref)
def test_dtype():
data = np.zeros((9, 9, 9), dtype=np.float64)
data[2:-2, 2:-2, 2:-2] = 10
img = Nifti1Image(data, np.eye(4))
masker = MultiNiftiMasker(dtype='auto')
masker.fit([[img]])
masked_img = masker.transform([[img]])
assert(masked_img[0].dtype == np.float32)
def test_standardization():
data_shape = (9, 9, 5)
n_samples = 500
signals = np.random.randn(2, np.prod(data_shape), n_samples)
means = np.random.randn(2, np.prod(data_shape), 1) * 50 + 1000
signals += means
img1 = Nifti1Image(signals[0].reshape(data_shape + (n_samples,)),
np.eye(4))
img2 = Nifti1Image(signals[1].reshape(data_shape + (n_samples,)),
np.eye(4))
mask = Nifti1Image(np.ones(data_shape), np.eye(4))
# z-score
masker = MultiNiftiMasker(mask, standardize='zscore')
trans_signals = masker.fit_transform([img1, img2])
for ts in trans_signals:
np.testing.assert_almost_equal(ts.mean(0), 0)
np.testing.assert_almost_equal(ts.std(0), 1)
# psc
masker = MultiNiftiMasker(mask, standardize='psc')
trans_signals = masker.fit_transform([img1, img2])
for ts, s in zip(trans_signals, signals):
np.testing.assert_almost_equal(ts.mean(0), 0)
np.testing.assert_almost_equal(ts,
(s / s.mean(1)[:, np.newaxis] *
100 - 100).T)
| 34.735043 | 77 | 0.611467 |
7953fc44bc41a5093f6dcfaf0b57b9c1fd6a1073 | 308 | py | Python | helper_utils/helperutils/boolean_argparse.py | splitstrument/utils | 8e33caec64dfd66369c3a19c069cf4a946a3fc95 | [
"MIT"
] | null | null | null | helper_utils/helperutils/boolean_argparse.py | splitstrument/utils | 8e33caec64dfd66369c3a19c069cf4a946a3fc95 | [
"MIT"
] | null | null | null | helper_utils/helperutils/boolean_argparse.py | splitstrument/utils | 8e33caec64dfd66369c3a19c069cf4a946a3fc95 | [
"MIT"
] | null | null | null | def str2bool(v):
import argparse
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| 25.666667 | 67 | 0.542208 |
7953fc4782f1a0d6f25c5c6def8ee4e211c3e7c7 | 15,230 | py | Python | quizv2.1.py | nealalan/quiz-giver | 86a089d4d75c91104102f6f6a637f96ba496b521 | [
"Unlicense"
] | 1 | 2019-03-18T15:09:37.000Z | 2019-03-18T15:09:37.000Z | quizv2.1.py | nealalan/quiz-giver | 86a089d4d75c91104102f6f6a637f96ba496b521 | [
"Unlicense"
] | null | null | null | quizv2.1.py | nealalan/quiz-giver | 86a089d4d75c91104102f6f6a637f96ba496b521 | [
"Unlicense"
] | null | null | null | ###########################################################################
# project: https://nealalan.github.io/quiz-giver-sec-plus
# updated: 2018/10/20 v2.1b
#
# python program to give a quiz based on two input files
#
# v2.1b commented out prints used for debugging and validated the Q#
# input incase the user messed it up
# See additional details in print_banner()
import re
import random
import string
from sys import stdout
###########################################################################
# INPUT FILES
FILE_NAME1 = "quiz.txt"
FILE_NAME2 = "key.txt"
# Regex to match any line beginning with a number
QUESTION_REGEX_PATTERN= "^[0-9]"
ANSWER_REGEX_PATTERN= "^[ABCDEFG]\."
QUESTION_NUMBER_PATTERN="^[0-9]{1,6}"
# this isn't liked...
QUESTION_NUMBER_PATTERN_NUMBER="\b\d+\b"
QUESTION_NUMBER_ONLY="^[0-9]"
# potential answers the user would enter
ANSWER_LIST=['A', 'a', 'B', 'b', 'C', 'c', 'D', 'd', 'E', 'e', 'F', 'f', 'AB', 'ab', 'AC', 'ac', 'ACE', 'ace', 'AD', 'ad', 'ACD', 'acd', 'BC', 'bc', 'BD', 'bd', 'CE', 'ce', 'DE', 'de', 'ABE', 'abe', 'ABF', 'abf']
# need logic to handle 1-3 possible answers at once
LETTER_ANSWER_AFTER_NUMBER="[{A-F}]+[\\.]"
LETTER_ANSWER_ONLY="[{A-F}]"
YES=['Y', 'y']
questions = []
answer_key = []
min_question_num = 1
max_question_num = 999999
###########################################################################
def create_key(id_number, correct_answer, explanation):
return {
"id_number" : id_number,
"correct_answer" : correct_answer,
"explanation" : explanation
}
def create_answer(text):
return {
"choice" : text
}
def create_question(id_number, text):
return {
"question" : text,
"id_number" : id_number,
"choices" : []
}
###########################################################################
def print_banner():
print("\n")
print("\033[1;31;38m/==============================================================\ \033[0m ")
print("\033[1;31;38m| NEAL'S LITTLE PYTHON SCRIPT TO STUDY SECURITY PLUS QUESTIONS | \033[0m ")
print("\033[1;31;38m| | \033[0m ")
print("\033[1;31;38m| https://nealalan.github.io/ | \033[0m ")
print("\033[1;31;38m| | \033[0m ")
print("\033[1;31;38m| Questions are randomly selected and will continue forever | \033[0m ")
print("\033[1;31;38m| unless you eXit or you reach 1,000,000 right answers. | \033[0m ")
print("\033[1;31;38m| | \033[0m ")
print("\033[1;31;38m| Updated to include SY0-401 and SY0-501 questions. | \033[0m ")
print("\033[1;31;38m| The 501 question numbers start with 50. | \033[0m ")
print("\033[1;31;38m| | \033[0m ")
print("\033[1;31;38m| **** v2.0 Updated for question subset selection **** | \033[0m ")
print("\033[1;31;38m| Add the ability to narrow the questions down to 1 book, | \033[0m ")
print("\033[1;31;38m| one practice test or even a single chapter | \033[0m ")
print("\033[1;31;38m| **** v2.1 Updated for multi-answer support **** | \033[0m ")
print("\033[1;31;38m| Multi-answer questions should always be answered | \033[0m ")
print("\033[1;31;38m| merged and in alpha order. Ex: AC (Not: A C or CA) | \033[0m ")
print("\033[1;31;38m| | \033[0m ")
print("\033[1;31;38m| | \033[0m ")
print("\033[1;31;38m| #TEAM_PIGSTICK | \033[0m ")
print("\033[1;31;38m\==============================================================/ \033[0m ")
print
return
###########################################################################
# EXTRACT QUESTIONS: The input text file must be in a format of questions and answers
# questions: will always be a single line and begin with a number followed
# by a period.
# answers: each answer will always be a single line and start with a letter
# followed by a period.
#
# 1. Test question
# A. Answer 1
# B. Answer 2
# C. Answer 3
#
# re.compile(regex_pattern): Compile a regular expression pattern into a regular expression object
# saving the resulting regular expression object for reuse is more efficient
# <re>.match(string): Search for a regex match at the beginning of the string only (returns 'None' or an object)
# to locate a match anywhere in string, use search() instead
# <re>.findall(string): Return all non-overlapping matches of pattern in string, as a list of strings.
#
# RETURNS an array of questions (though I could have just used a "global questions[]" maybe)
def extract_questions(file_name):
read_count = 0
with open(file_name) as f:
# create the search parameters
question_regex = re.compile(QUESTION_REGEX_PATTERN)
number_regex = re.compile(QUESTION_NUMBER_PATTERN)
answer_regex = re.compile(ANSWER_REGEX_PATTERN)
# start parsing the file
# and writing it the the questions[] appropriately
for line in f:
# FIND LINES OF TEXT THAT ARE A QUESTION
if question_regex.match(line):
read_count += 1
# this is a silly way to do it - "find all numbers as strings in an array"
# but regex will only return an array of strings
line_number_string = number_regex.findall(line)
# AND, convert the first (only) string number to an int
# Note: The line number is not a consecutive int in the quiz data!
line_number = int(filter(str.isdigit, line_number_string[0]))
questions.append(create_question(line_number,line))
# FINE LINES OF TEXT THAT ARE AN ANSWER CHOICE FOR THE QUESTION
elif answer_regex.match(line):
# write to the array of questions in the next position
questions[len(questions) - 1]['choices'].append(create_answer(line))
print('QUESTIONES ADDED: ' + str(read_count))
return questions
###########################################################################
# EXTRACT ANSWER KEY: The input text file must be in a format of answers
# Question Number, followed by a period, correct answer(s), followed by a period,
# the explanation of the answer
#
# 1. AC. The answer is both A and C.
#
def extract_key(file_name):
read_count = 0
with open(file_name) as f:
# create the search parameters
question_regex = re.compile(QUESTION_REGEX_PATTERN)
correct_answer_letter_regex = re.compile(LETTER_ANSWER_AFTER_NUMBER)
for line in f:
# if the Q & A are out of order, print and check through
#print questions[line_number]
if question_regex.match(line):
read_count += 1
# same dirty number extraction as above
line_number_string = question_regex.findall(line)
line_number = int(filter(str.isdigit, line_number_string[0]))
# THIS CODE NEEDS TO BE UPDATED TO HANDLE MULTPLE LETTER ANSWERS
correct_letter_a = correct_answer_letter_regex.findall(line,0,15)
#print(str(line_number) + ': ' + str(correct_letter_a))
correct_letter = re.findall(LETTER_ANSWER_ONLY,correct_letter_a[0])
#print(str(line_number) + ': ' + str(correct_letter))
answer_key.append(create_key(line_number,correct_letter,line))
#print('ANSWERS ADDED: ' + str(line_number))
return answer_key
###########################################################################
def question_range():
# allow the global variables to be set in the function
global min_question_num
global max_question_num
i = raw_input('\033[1;35;38m QUESTION RANGE? (Y or N) \033[0m ')
if i in YES:
print
print("\033[1;31;38m/==============================================================\ \033[0m ")
print("\033[1;31;38m| SY0-401 - Questions range from 1 - 11020 | \033[0m ")
print("\033[1;31;38m| SY0-501 - Questions range from 500001 - 511015 | \033[0m ")
print("\033[1;31;38m| X##XXX - ## equals a chapter number | \033[0m ")
print("\033[1;31;38m| XXX#XX - Range 0-1 = pretest, 2-3 = post-test | \033[0m ")
print("\033[1;31;38m| | \033[0m ")
print("\033[1;31;38m| 1-12000 = SY0-401 pre-test, post-test, ch 1-11 (all) | \033[0m ")
print("\033[1;31;38m| 200-300 = SY0-401 post-test | \033[0m ")
print("\033[1;31;38m| 500000-511015 = SY0-501 pre-test, post-test, ch 1-11 (all) | \033[0m ")
print("\033[1;31;38m| 500000-500075 = SY0-501 pretest | \033[0m ")
print("\033[1;31;38m| 500200-500275 = SY0-501 post-test | \033[0m ")
print("\033[1;31;38m| 501000-503999 = SY0-501 ch 1-3 review questions | \033[0m ")
print("\033[1;31;38m\==============================================================/ \033[0m ")
print
get_question_range_numbers()
while int(min_question_num) > int(max_question_num):
get_question_range_numbers()
return
# validate the user put in a valid number
# if the number is outside of the question range, we will deal with that later
def get_question_range_numbers():
global min_question_num
global max_question_num
min_question_num = raw_input('\033[1;35;38m MIN QUESTION NUMBER: \033[0m ')
while not min_question_num.isdigit():
min_question_num = raw_input('\033[1;35;38m MIN QUESTION NUMBER: \033[0m ')
max_question_num = raw_input('\033[1;35;38m MAX QUESTION NUMBER: \033[0m ')
while not max_question_num.isdigit():
max_question_num = raw_input('\033[1;35;38m MAX QUESTION NUMBER: \033[0m ')
return
###########################################################################
# Used for debugging the input questions - This will print a question and
# same array offset for the answer
def print_all_questions():
for i in range(len(questions)):
print('-------------------------------------')
print(questions[i])
print(answer_key[i])
return
###########################################################################
def get_question(current_question_record):
return questions[current_question_record]
def check_question_number_range(question):
if int(min_question_num) > int(question['id_number']) or int(question['id_number']) > int(max_question_num):
#print("false: " + str(min_question_num) + " <= " + str(question['id_number']) + " >= " + str(max_question_num))
return False
else:
#print("true: " + str(min_question_num) + " <= " + str(question['id_number']) + " >= " + str(max_question_num))
return True
def check_answer_key(current_question_record):
return answer_key[current_question_record]
###########################################################################
def quiz():
number = 0
wrong = 0
right = 0
continue_quiz = True
letter_regex = re.compile(LETTER_ANSWER_ONLY)
while continue_quiz:
number += 1
infinity_loop_check = 0
# pick a random number to use for a random question within the array
current_question_record = random.randrange(0, len(questions), 1)
current_question = get_question(current_question_record)
# validate if the id_number from the question is within the subset desired
while check_question_number_range(questions[current_question_record]) is False:
if infinity_loop_check > 3000:
print("NO QUESTIONS IN RANGE!!! (infinity loop stopped)")
continue_quiz = False
break
current_question_record = random.randrange(0, len(questions), 1)
current_question = get_question(current_question_record)
infinity_loop_check += 1
if continue_quiz is False:
break
# print the random question and associated choices to the output
print("\n\033[1;33;39m" + current_question['question'] + " \033[0m ")
for choice in current_question['choices']:
print("\033[1;33;39m" + choice['choice'] + " \033[0m ")
# read the answer key
current_answer_key = check_answer_key(current_question_record)
# indicate if this is an answer with more than one letters
#print(len(current_answer_key['correct_answer']))
if len(current_answer_key['correct_answer']) > 1:
print('NOTE: THIS IS A MULTI ANSWER QUESTION!')
# ask the user for the answer
get_answer = raw_input('ENTER YOUR ANSWER: ')
get_answer = string.upper(get_answer)
# loop until the user inputs an answer that's in ANSWER_LIST=[] in variables
while get_answer not in ANSWER_LIST:
get_answer = raw_input('\033[5;35;38mENTER YOUR ANSWER:\033[0m ')
get_answer = string.upper(get_answer)
# extract the answers in "AB" format to be in ['A', 'B'] format to compare
get_answer = letter_regex.findall(get_answer)
if get_answer == current_answer_key['correct_answer']:
print("\n\033[5;31;38mCONGRATS, YOU'RE RIGHT! \033[0m ")
right += 1
else:
# write out in the format: CORRECT ANSWER: AB
stdout.write(' CORRECT ANSWER:' + ' ')
for letter in current_answer_key['correct_answer']:
stdout.write(letter)
wrong += 1
print("\n\033[1;33;39m" + current_answer_key['explanation'] + " \033[0m ")
get_answer = raw_input("PRESS ENTER TO CONTINUE OR X TO EXIT")
if get_answer in ['X','x','Q','q']:
continue_quiz = False
print("\n\033[5;31;38mYOU GOT " + str(right) + " RIGHT AND " + str(wrong) + " WRONG! OUT Of "+ str(right + wrong) + " QUESTIONS. \033[0m ")
return
###########################################################################
if __name__ == '__main__':
print_banner()
# note: The questions and answer_key are read in and stored in two separate arrays
# this program is written to use the [index_offset] to match the Q and A and no
# error checking is in place to make sure the Q matches the A at the same [array_index]
questions = extract_questions(FILE_NAME1)
answer_key = extract_key(FILE_NAME2)
# ask the user if they want to narrow down the questions asked and validate the user input
question_range()
# used for debugging input Q and A that don't match
#print_all_questions()
quiz()
print("\nBYE")
| 49.934426 | 212 | 0.560867 |
7953fc826102a212dd2440408c6d1e9663a37b55 | 13,296 | py | Python | nvidia_MODEL2.py | savinay95n/Behavioral-cloning-Steering-angle-prediction | efa0c02d1d21dc7233899193de0be5615fe27318 | [
"Apache-2.0"
] | 2 | 2022-02-07T20:46:32.000Z | 2022-03-13T07:27:44.000Z | nvidia_MODEL2.py | savinay95n/Behavioral-cloning-Steering-angle-prediction | efa0c02d1d21dc7233899193de0be5615fe27318 | [
"Apache-2.0"
] | 2 | 2021-05-04T19:42:32.000Z | 2021-05-04T19:50:49.000Z | nvidia_MODEL2.py | savinay95n/Behavioral-cloning-Steering-angle-prediction | efa0c02d1d21dc7233899193de0be5615fe27318 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
import json
import math
import keras
from keras.preprocessing.image import *
from keras.models import Sequential, Model
from keras.layers import Convolution2D, Flatten, MaxPooling2D, Lambda, ELU
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import Adam
from keras.callbacks import Callback
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
import threading
import tensorflow as tf
from IPython.display import display
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
import h5py
from keras.models import load_model
# fix random seed reproducibility
# This helps in debugging
seed = 7
np.random.seed(seed)
columns = ['center', 'left', 'right', 'steering_angle', 'throttle',
'brake', 'speed']
# Loading Data
print('Loading Dataset ...')
filepath = os.path.join(os.getcwd(), 'driving_log.csv')
data = pd.read_csv(filepath, names = columns)
# Data description
print('Dataset_Columns: ', columns, '\n')
print('Dataset shape: ', data.shape, '\n')
print(data.describe(), '\n')
print('Dataset Loaded...')
# Exploring dataset
# Histogram of Steering Angles before Image Augmentation
binwidth = 0.025
plt.hist(data.steering_angle,
bins = np.arange(min(data.steering_angle),
max(data.steering_angle)
+ binwidth, binwidth))
plt.title('Number of images per steering angle')
plt.xlabel('Steering Angle')
plt.ylabel('# Frames')
plt.show()
# Train and Validation split data in 90 : 10 ratio
# shuffle data
data = data.reindex(np.random.permutation(data.index))
num_train = int((len(data) / 10.) * 9.)
# Slicing the dataframe
X_train = data.iloc[:num_train]
X_validation = data.iloc[num_train:]
print('X_train has {} elements', format(len(X_train)))
print('X_validation has {} elements', format(len(X_validation)))
# Image Augmentation and Pre-processing Hyper Parameters
CAMERA_OFFSET = 0.25
CHANNEL_SHIFT_RANGE = 0.2
WIDTH_SHIFT_RANGE = 100
HEIGHT_SHIFT_RANGE = 40
PROCESSED_IMG_COLS = 64
PROCESSED_IMG_ROWS = 64
PROCESSED_IMG_CHANNELS = 3
# Model Hyper Parameters
#NB_EPOCH = 20
NB_EPOCH = 10
BATCH_SIZE = 64
# Data Augmentation Functions
# flip images horizontally
def horizontal_flip(img, steering_angle):
flipped_image = cv2.flip(img, 1)
steering_angle = -1 * steering_angle
return flipped_image, steering_angle
# shift range for each channels
def channel_shift(img, channel_shift_range = CHANNEL_SHIFT_RANGE):
img_channel_index = 2 #tf indexing
channel_shifted_image = random_channel_shift(img, channel_shift_range,
img_channel_index)
return channel_shifted_image
# shift height/width of the image by a small amount
def height_width_shift(img, steering_angle):
rows, cols, channels = img.shape
#Translation
tx = WIDTH_SHIFT_RANGE * np.random.uniform() - WIDTH_SHIFT_RANGE / 2
ty = HEIGHT_SHIFT_RANGE * np.random.uniform() - HEIGHT_SHIFT_RANGE / 2
steering_angle = steering_angle + tx / WIDTH_SHIFT_RANGE * 2 * 0.2
transform_matrix = np.float32([[1, 0, tx],
[0, 1, ty]])
translated_image = cv2.warpAffine(img, transform_matrix, (cols, rows))
return translated_image, steering_angle
# brightness shift
def brightness_shift(img, bright_value = None):
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
if bright_value:
img[:,:,2] += bright_value
else:
random_bright = 0.25 + np.random.uniform()
img[:,:,2] = img[:,:,2] * random_bright
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
# crop the image
def crop_resize_image(img):
shape = img.shape
img = img[math.floor(shape[0]/5):shape[0]-25, 0:shape[1]]
img = cv2.resize(img, (PROCESSED_IMG_COLS, PROCESSED_IMG_ROWS), interpolation=cv2.INTER_AREA)
return img
# Combining Augmentations
# Wrapper Function
def apply_random_transformation(img, steering_angle):
transformed_image, steering_angle = height_width_shift(img, steering_angle)
transformed_image = brightness_shift(transformed_image)
# transformed_image = channel_shift(transformed_image) # increasing train time. not much benefit. commented
if np.random.random() < 0.5:
transformed_image, steering_angle = horizontal_flip(transformed_image, steering_angle)
transformed_image = crop_resize_image(transformed_image)
return transformed_image, steering_angle
# Image Augmentation Visualization
test_row = data.values[np.random.randint(len(data.values))]
test_img = cv2.imread(test_row[0])
test_steer = test_row[3]
def aug_visualization(test_img, test_steer):
#original image
plt.figure()
plt.xlabel('Original Test Image, Steering angle :' + str(test_steer))
plt.imshow(test_img)
#horizontally flipped image
flipped_image, new_steering_angle = horizontal_flip(test_img, test_steer)
plt.figure()
plt.xlabel("Horizontally Flipped, New steering angle: " + str(new_steering_angle))
plt.imshow(flipped_image)
#channel shifted image
channel_shifted_image = channel_shift(test_img, 255)
plt.figure()
plt.xlabel("Random Channel Shifted, Steering angle: " + str(test_steer))
plt.imshow(channel_shifted_image)
# width shifted image
width_shifted_image, new_steering_angle = height_width_shift(test_img, test_steer)
new_steering_angle = "{:.7f}".format(new_steering_angle)
plt.figure()
plt.xlabel("Random HT and WD Shifted, New steering angle: " + str(new_steering_angle))
plt.imshow(width_shifted_image)
#brightened image
brightened_image = brightness_shift(test_img, 255)
plt.figure()
plt.xlabel("Brightened, Steering angle: " + str(test_steer))
plt.imshow(brightened_image)
#crop
cropped_image = crop_resize_image(test_img)
plt.figure()
plt.xlabel("Cropped and Resized, Steering angle: " + str(test_steer))
plt.imshow(cropped_image)
def load_and_augment_image(line_data):
i = np.random.randint(3)
if (i == 0):
path_file = line_data['left'][0].strip()
shift_angle = CAMERA_OFFSET
elif (i == 1):
path_file = line_data['center'][0].strip()
shift_angle = 0
elif (i == 2):
path_file = line_data['right'][0].strip()
shift_angle = - 1 * CAMERA_OFFSET
steering_angle = line_data['steering_angle'][0] + shift_angle
img = cv2.imread(path_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img, steering_angle = apply_random_transformation(img, steering_angle)
return img, steering_angle
generated_steering_angles = []
threshold = 1
#@threadsafe_generator
def generate_batch_data(_data, batch_size = 32):
batch_images = np.zeros((batch_size, PROCESSED_IMG_ROWS, PROCESSED_IMG_COLS, PROCESSED_IMG_CHANNELS))
batch_steering = np.zeros(batch_size)
while 1:
for batch_index in range(batch_size):
row_index = np.random.randint(len(_data))
line_data = _data.iloc[[row_index]].reset_index()
# idea borrowed from Vivek Yadav: Sample images such that images with lower angles
# have lower probability of getting represented in the dataset. This alleviates
# any problems we may encounter due to model having a bias towards driving straight.
keep = 0
while keep == 0:
x, y = load_and_augment_image(line_data)
if abs(y) < .1:
val = np.random.uniform()
if val > threshold:
keep = 1
else:
keep = 1
batch_images[batch_index] = x
batch_steering[batch_index] = y
generated_steering_angles.append(y)
yield batch_images, batch_steering
iterator = generate_batch_data(X_train, batch_size=10)
sample_images, sample_steerings = iterator.__next__()
def batch_generation_visualization():
plt.subplots(figsize=(20, 5))
for i, img in enumerate(sample_images):
plt.subplot(2, 5, i+1)
plt.axis('off')
plt.title("Steering: {:.4f}".format(sample_steerings[i]))
plt.imshow(img)
plt.show()
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1., input_shape=(PROCESSED_IMG_ROWS, PROCESSED_IMG_COLS, PROCESSED_IMG_CHANNELS)))
model.add(Convolution2D(24, 5, 5, activation='elu', subsample=(2, 2), name='Conv1'))
model.add(Convolution2D(36, 5, 5, activation='elu', subsample=(2, 2), name='Conv2'))
model.add(Convolution2D(48, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, activation='elu'))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, activation='elu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
#model = load_model('modelv2-3.h5')
model.summary()
#model = load_model('model-1.h5')
#model = load_model('model-2.h5')
# checkpoint
checkpoint = ModelCheckpoint('modelv2-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='auto')
# compile
opt = Adam(lr=0.0001)
model.compile(optimizer=opt, loss='mse', metrics=[])
class LifecycleCallback(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
global threshold
threshold = 1 / (epoch + 1)
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def on_train_begin(self, logs={}):
print('BEGIN TRAINING')
self.losses = []
def on_train_end(self, logs={}):
print('END TRAINING')
# Calculate the correct number of samples per epoch based on batch size
def calc_samples_per_epoch(array_size, batch_size):
num_batches = array_size / batch_size
samples = math.ceil(num_batches)
samples_per_epoch = samples * batch_size
return samples_per_epoch
lifecycle_callback = LifecycleCallback()
train_generator = generate_batch_data(X_train, BATCH_SIZE)
validation_generator = generate_batch_data(X_validation, BATCH_SIZE)
samples_per_epoch = calc_samples_per_epoch((len(X_train)*3), BATCH_SIZE)
nb_val_samples = calc_samples_per_epoch((len(X_validation)*3), BATCH_SIZE)
history = model.fit_generator(train_generator,
validation_data = validation_generator,
samples_per_epoch = len(X_train),
nb_val_samples = len(X_validation),
nb_epoch = NB_EPOCH,
verbose=1,
callbacks=[lifecycle_callback, checkpoint])
model.save('./modelv2-4.h5')
model_json = model.to_json()
with open("./modelv2-004.json", "w") as json_file:
json.dump(model_json, json_file)
model.save_weights("./modelv2-004.h5")
print("Saved model to disk")
# list all data in history
print(history.history.keys())
# summarize history for epoch loss
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
plt.figure()
plt.hist(generated_steering_angles, bins=np.arange(min(generated_steering_angles), max(generated_steering_angles) + binwidth, binwidth))
plt.title('Number of augmented images per steering angle')
plt.xlabel('Steering Angle')
plt.ylabel('# Augmented Images')
plt.show()
# summarize history for batch loss
plt.figure()
batch_history = lifecycle_callback.losses
plt.plot(batch_history)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('batches')
plt.show()
# Layer visualizations
test_fn = "IMG/center_2016_12_01_13_32_43_457.jpg"
def visualize_model_layer_output(layer_name):
model2 = Model(input=model.input, output=model.get_layer(layer_name).output)
img = load_img(test_fn)
img = crop_resize_image(img_to_array(img))
img = np.expand_dims(img, axis=0)
conv_features = model2.predict(img)
print("conv features shape: ", conv_features.shape)
# plot features
plt.subplots(figsize=(5, 5))
for i in range(16):
plt.subplot(4, 4, i+1)
plt.axis('off')
plt.imshow(conv_features[0,:,:,i], cmap='gray')
plt.show()
visualize_model_layer_output('Conv1')
visualize_model_layer_output('Conv2') | 34.897638 | 137 | 0.66426 |
7953fe1ac7e30c8cc6c2252b0c6a2892b8d030bf | 1,014 | py | Python | src/adata/window/__init__.py | txemavs/adata | 89a6d27fa59dc26ae3036685bb9d8bfb8e983fd9 | [
"MIT"
] | 1 | 2018-03-24T12:18:08.000Z | 2018-03-24T12:18:08.000Z | src/adata/window/__init__.py | txemavs/adata | 89a6d27fa59dc26ae3036685bb9d8bfb8e983fd9 | [
"MIT"
] | null | null | null | src/adata/window/__init__.py | txemavs/adata | 89a6d27fa59dc26ae3036685bb9d8bfb8e983fd9 | [
"MIT"
] | null | null | null |
import wx.html2
from ..core import *
from ..gui.text import TextEditor
class TextWindow(wx.Frame):
'''A text editor window
'''
def __init__(self, filepath=None):
wx.Frame.__init__(self, None, -1, filepath, size=(800, 600))
self.text = TextEditor(self)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.text,1,wx.EXPAND)
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
self.Show()
with open(filepath, "rt", encoding="utf-8") as f:
self.text.AddText(f.read())
class Browser(wx.Frame):
'''A web browser window
'''
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.browser = wx.html2.WebView.New(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.browser, 1, wx.EXPAND, 10)
self.SetSizer(sizer)
self.SetSize((800, 600))
self.Show()
def Go(self, url):
self.browser.LoadURL(url)
| 23.581395 | 68 | 0.585799 |
7953fede98d7a26981cd0d4f577fe63f954965ba | 644 | py | Python | tests/test_set_ramp_z.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 64 | 2020-03-18T12:11:22.000Z | 2022-03-31T08:19:18.000Z | tests/test_set_ramp_z.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 148 | 2020-05-14T06:14:11.000Z | 2022-03-26T15:02:31.000Z | tests/test_set_ramp_z.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 16 | 2020-05-31T00:53:44.000Z | 2022-03-23T13:20:57.000Z | import pyclesperanto_prototype as cle
import numpy as np
def test_set_ramp_z():
result = cle.push(np.asarray([
[
[0, 0, 0],
[3, 4, 3],
[3, 4, 3]
], [
[3, 4, 3],
[3, 4, 3],
[3, 4, 3]
]
]))
reference = cle.push(np.asarray([
[
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
], [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
]
]))
cle.set_ramp_z(result)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.allclose(a, b, 0.001))
| 17.405405 | 37 | 0.354037 |
7953ff924c2ac4094200c80736168f32da38a40b | 1,020 | py | Python | src/charma/media_info/interface.py | mononobi/charma-server | ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced | [
"BSD-3-Clause"
] | 1 | 2020-01-16T23:36:10.000Z | 2020-01-16T23:36:10.000Z | src/charma/media_info/interface.py | mononobi/imovie-server | ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced | [
"BSD-3-Clause"
] | 24 | 2020-06-08T18:27:04.000Z | 2021-06-06T12:01:39.000Z | src/charma/media_info/interface.py | mononobi/charma-server | ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced | [
"BSD-3-Clause"
] | 1 | 2020-12-20T05:29:04.000Z | 2020-12-20T05:29:04.000Z | # -*- coding: utf-8 -*-
"""
media info interface module.
"""
from threading import Lock
from abc import abstractmethod
from pyrin.core.structs import CoreObject, MultiSingletonMeta
from pyrin.core.exceptions import CoreNotImplementedError
class MediaInfoSingletonMeta(MultiSingletonMeta):
"""
media info singleton meta class.
this is a thread-safe implementation of singleton.
"""
_instances = dict()
_lock = Lock()
class AbstractMediaInfoProvider(CoreObject, metaclass=MediaInfoSingletonMeta):
"""
abstract media info provider class.
"""
@abstractmethod
def get_info(self, file, **options):
"""
gets a dict containing media info of given file.
:param str file: absolute path of video file.
:raises CoreNotImplementedError: core not implemented error.
:returns: dict(int runtime,
int width,
int height)
:rtype: dict
"""
raise CoreNotImplementedError()
| 22.173913 | 78 | 0.655882 |
7953ffa279b8649e92758d2ab5c797dcca767b53 | 5,956 | py | Python | webapp/models/l0_model.py | dushik/AdversarialDNN-Playground | cd960c37aeb610f01c30c296ac481606209ed9ee | [
"Apache-2.0"
] | 125 | 2017-05-03T23:56:48.000Z | 2022-03-31T09:49:23.000Z | webapp/models/l0_model.py | dushik/AdversarialDNN-Playground | cd960c37aeb610f01c30c296ac481606209ed9ee | [
"Apache-2.0"
] | 4 | 2017-06-05T15:16:48.000Z | 2022-02-09T23:26:25.000Z | webapp/models/l0_model.py | Qdata4Capstone/capstone18-AdversarialTexPlayground2 | 8feb6e1ef1d293dcb8869faa8a84216cdc3dd5ce | [
"Apache-2.0"
] | 29 | 2017-06-01T15:31:27.000Z | 2021-04-01T13:01:35.000Z | # ML includes
import tensorflow as tf
import numpy as np
import pandas as pd
# General python includes
import os
import math
import json
from itertools import permutations, combinations, product
# Plotting
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('fivethirtyeight')
def grad(F):
x = tf.get_collection('mnist')[0]
grad_list = [ tf.gradients(F[:,i], x)[0] for i in range(10) ] # List of gradient tensors
return tf.stack(grad_list, axis=2) # dimension = (?, 784, 10)
def slow_map(grad_F, X, t, feature_set):
print('using slow map')
# Get the feed dict parameters we needed
x = tf.get_collection('mnist')[0]
keep_prob = tf.get_collection('mnist')[2]
M = 0 # Max -alpha*beta
p1 = None
p2 = None
M_nolimits = 0
p1_nolimits = None
p2_nolimits = None
gF = grad_F.eval(feed_dict = {x:X, keep_prob:1.0})
pixelSumGF = np.sum(gF, axis=2) # sum along the innermost axis
for (p, q) in combinations(feature_set, 2):
alpha = gF[:, p, t] + gF[:, q, t]
beta = pixelSumGF[:,p] + pixelSumGF[:,q] - alpha
if -alpha*beta > M:
(p1_nolimits, p2_nolimits) = (p, q)
M_nolimits = -alpha*beta
if alpha < 0 and beta > 0:
(p1, p2) = (p, q)
M = -alpha*beta
if p1 is None or p2 is None:
return p1_nolimits, p2_nolimits
else:
return p1, p2
# Uses the numpy functions for argmax like cleverhans
def faster_map(grad_F, x_adversary, t, feature_set):
print('Using fastest map')
x = tf.get_collection('mnist')[0]
gF = grad_F.eval(feed_dict = {x:x_adversary}).squeeze()
num_raw_features = gF.shape[0]
target_vector = gF[:, t].reshape(num_raw_features)
other_vector = np.sum(gF, axis=1).reshape(num_raw_features) - target_vector # Get sum of "all but target col"
k = int(len(feature_set)*.25) # consider the top quarter of the feature set; magic number to match cleverhans
ordered_feature_set = sorted(feature_set, key=lambda x: target_vector[x])
best_pixels = ordered_feature_set[:k]
num_features = len(feature_set)
tV_best = target_vector[best_pixels].reshape((1, k))
oV_best = other_vector[best_pixels].reshape((1, k))
tV_features = target_vector[ordered_feature_set].reshape((num_features, 1))
oV_features = target_vector[ordered_feature_set].reshape((num_features, 1))
target_sum = tV_best + tV_features
other_sum = oV_best + oV_features
#print(target_sum.shape)
# heavily influenced by cleverhans
scores = -target_sum * other_sum
np.fill_diagonal(scores, 0)
scores_mask = ((target_sum < 0) & (other_sum > 0))
scores *= scores_mask
(p1_raw, p2_raw) = np.unravel_index(np.argmax(scores), scores.shape)
#print('The scores has shape {}'.format(scores.shape))
p1 = ordered_feature_set[p1_raw]
p2 = best_pixels[p2_raw]
if (p1 != p2):
return p1, p2
else:
return None, None
# Original FJSMA implementation; does not use numpy to fullest advantage
# Not used; preserved "just in case." Remove later.
def fast_map(grad_F, x_adversary, t, feature_set):
print('Using fast map')
x = tf.get_collection('mnist')[0]
M = 0 # Max -alpha*beta
p1 = None
p2 = None
M_nolimits = 0
p1_nolimits = None
p2_nolimits = None
gF = grad_F.eval(feed_dict = {x:x_adversary})
pixelSumGF = np.sum(gF, axis=2) # sum along the innermost axis
top_ct = int(len(feature_set)*.1) # consider the top tenth of the feature set
best_p = sorted(feature_set, key=lambda p: gF[:, p, t])[:top_ct]
for (p, q) in product(best_p, feature_set):
if p==q:
continue
alpha = gF[:, p, t] + gF[:, q, t]
beta = pixelSumGF[:,p] + pixelSumGF[:,q] - alpha
if alpha < 0 and beta > 0 and -alpha*beta > M:
(p1, p2) = (p, q)
M = -alpha*beta
if -alpha*beta > M:
(p1_nolimits, p2_nolimits) = (p, q)
M_nolimits = -alpha*beta
if p1 is None or p2 is None:
return p1_nolimits, p2_nolimits
else:
return p1, p2
def attack(X, target_class, max_distortion, fast=False):
# unpack the string parameters into non-string parameters, as needed
max_distortion = float(max_distortion)
# Get the feed dict parameters we needed
x = tf.get_collection('mnist')[0]
keep_prob = tf.get_collection('mnist')[2]
orig = np.copy(X)
F = tf.get_collection('mnist')[3]
feature_set = {i for i in range(X.shape[1]) if X[0, i] != 0}
curr_iter = 0
max_iter = math.floor(784*max_distortion / 2)
classify_op = tf.argmax(F,1)
gradF = grad(F)
source_class = classify_op.eval(feed_dict={x:X, keep_prob:1.0})
print('Evaled first thing')
saliency_map = faster_map if fast else slow_map
while source_class != target_class and feature_set and curr_iter < max_iter:
p1, p2 = saliency_map(gradF, X, target_class, feature_set)
X[0, p1] = max(X[0, p1] - 1, 0)
X[0, p2] = max(X[0, p2] - 1, 0)
if X[0, p1] == 0:
feature_set.remove(p1)
if X[0, p2] == 0:
feature_set.remove(p2)
source_class = classify_op.eval(feed_dict={x:X, keep_prob:1.0})
curr_iter += 1
if (curr_iter % 10 == 0):
print(curr_iter)
print('Finished {} iterations.'.format(curr_iter))
### Create plot of relative likelihoods for each class ###
adv_probs = F.eval(feed_dict={x:X})[0]
norm_probs = F.eval(feed_dict={x:orig})[0]
adv_scaled = (adv_probs - adv_probs.min()) / adv_probs.ptp()
norm_scaled = (norm_probs - norm_probs.min()) / norm_probs.ptp()
return source_class[0], np.reshape(X, (28, 28)), adv_probs
mnist_data = None
def setup(mnist_filename):
global mnist_data
# Will run on import
print('Setting up the L1 model with MNIST model at {}'.format(mnist_filename))
sess = tf.InteractiveSession()
new_saver = tf.train.import_meta_graph(mnist_filename)
new_saver.restore(sess, tf.train.latest_checkpoint('./webapp/models'))
with open('./webapp/models/seeds.json') as f:
mnist_data = json.load(f)
| 31.183246 | 112 | 0.665883 |
7953ffbba29f0d026f8de5ea81967dddf76cd2c2 | 2,332 | py | Python | samples/cli/accelbyte_py_sdk_cli/ugc/_single_admin_get_group.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | samples/cli/accelbyte_py_sdk_cli/ugc/_single_admin_get_group.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | samples/cli/accelbyte_py_sdk_cli/ugc/_single_admin_get_group.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-ugc-service (2.1.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.ugc import single_admin_get_group as single_admin_get_group_internal
from accelbyte_py_sdk.api.ugc.models import ModelsCreateGroupResponse
from accelbyte_py_sdk.api.ugc.models import ResponseError
@click.command()
@click.argument("group_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def single_admin_get_group(
group_id: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(single_admin_get_group_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = single_admin_get_group_internal(
group_id=group_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"SingleAdminGetGroup failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
single_admin_get_group.operation_id = "SingleAdminGetGroup"
single_admin_get_group.is_deprecated = False
| 31.945205 | 94 | 0.744425 |
79540109ca08e895ba6ccd122a27747fadfbc57a | 1,227 | py | Python | pov/make_timelapse.py | ANaka/pov | 59edc1e6761baa45f197f3f8d9a79a7152eb9dc8 | [
"CC0-1.0"
] | null | null | null | pov/make_timelapse.py | ANaka/pov | 59edc1e6761baa45f197f3f8d9a79a7152eb9dc8 | [
"CC0-1.0"
] | null | null | null | pov/make_timelapse.py | ANaka/pov | 59edc1e6761baa45f197f3f8d9a79a7152eb9dc8 | [
"CC0-1.0"
] | null | null | null | import ffmpeg
import click
from pathlib import Path
import fn
ROOT_DIRECTORY = '/home/naka/Videos/pov'
@click.command()
@click.option('--input_dir', '-i', default=None)
@click.option('--suffix', '-s', default='.jpg')
@click.option('--output', '-o', default=None)
@click.option('--framerate', '-r', default=35)
@click.option('--crf', '-c', default=25)
def make_timelapse(
input_dir=None,
output=None,
suffix='.jpg',
framerate=35,
crf=25,
):
if input_dir == None:
plot_id = fn.get_current_plot_id()
input_dir = Path(ROOT_DIRECTORY).joinpath(plot_id)
if output == None:
output = input_dir.joinpath('movie.mp4')
try:
(
ffmpeg
.input(
f'{input_dir}/*{suffix}',
pattern_type='glob',
framerate=framerate,
pix_fmt='yuv420p',
)
.output(output.as_posix(), crf=crf)
.run(capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
print('stdout:', e.stdout.decode('utf8'))
print('stderr:', e.stderr.decode('utf8'))
raise e
if __name__ == '__main__':
make_timelapse() | 27.266667 | 58 | 0.560717 |
7954014a7c8fa87f1ee562fe3804f25b2adbddbd | 4,078 | py | Python | recipes/WHAMandWHAMR/meta/create_whamr_rirs.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 3,913 | 2021-03-14T13:54:52.000Z | 2022-03-30T05:09:55.000Z | recipes/WHAMandWHAMR/meta/create_whamr_rirs.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 667 | 2021-03-14T20:11:17.000Z | 2022-03-31T04:07:17.000Z | recipes/WHAMandWHAMR/meta/create_whamr_rirs.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 785 | 2021-03-14T13:20:57.000Z | 2022-03-31T03:26:03.000Z | """
Adapted from the original WHAMR script to obtain the Room Impulse ResponsesRoom Impulse Responses
Authors
* Cem Subakan 2021
"""
import os
import pandas as pd
import argparse
import torchaudio
from recipes.WHAMandWHAMR.meta.wham_room import WhamRoom
from scipy.signal import resample_poly
import torch
from speechbrain.pretrained.fetching import fetch
from tqdm import tqdm
import pyroomacoustics
def create_rirs(output_dir, sr=8000):
"""
This function creates the room impulse responses from the WHAMR! dataset
The implementation is based on the scripts from http://wham.whisper.ai/
Arguments:
------
output_dir (str) : directory for saving the RIRs
sr (int) : sampling rate with which we save
"""
assert (
pyroomacoustics.__version__ == "0.3.1"
), "The pyroomacoustics version needs to be 0.3.1"
os.makedirs(output_dir)
metafilesdir = os.path.dirname(os.path.realpath(__file__))
filelist = [
"mix_2_spk_filenames_tr.csv",
"mix_2_spk_filenames_cv.csv",
"mix_2_spk_filenames_tt.csv",
"reverb_params_tr.csv",
"reverb_params_cv.csv",
"reverb_params_tt.csv",
]
savedir = os.path.join(metafilesdir, "data")
for fl in filelist:
if not os.path.exists(os.path.join(savedir, fl)):
fetch(
"metadata/" + fl,
"speechbrain/sepformer-whamr",
savedir=savedir,
save_filename=fl,
)
FILELIST_STUB = os.path.join(
metafilesdir, "data", "mix_2_spk_filenames_{}.csv"
)
SPLITS = ["tr"]
reverb_param_stub = os.path.join(
metafilesdir, "data", "reverb_params_{}.csv"
)
for splt in SPLITS:
wsjmix_path = FILELIST_STUB.format(splt)
wsjmix_df = pd.read_csv(wsjmix_path)
reverb_param_path = reverb_param_stub.format(splt)
reverb_param_df = pd.read_csv(reverb_param_path)
utt_ids = wsjmix_df.output_filename.values
for output_name in tqdm(utt_ids):
utt_row = reverb_param_df[
reverb_param_df["utterance_id"] == output_name
]
room = WhamRoom(
[
utt_row["room_x"].iloc[0],
utt_row["room_y"].iloc[0],
utt_row["room_z"].iloc[0],
],
[
[
utt_row["micL_x"].iloc[0],
utt_row["micL_y"].iloc[0],
utt_row["mic_z"].iloc[0],
],
[
utt_row["micR_x"].iloc[0],
utt_row["micR_y"].iloc[0],
utt_row["mic_z"].iloc[0],
],
],
[
utt_row["s1_x"].iloc[0],
utt_row["s1_y"].iloc[0],
utt_row["s1_z"].iloc[0],
],
[
utt_row["s2_x"].iloc[0],
utt_row["s2_y"].iloc[0],
utt_row["s2_z"].iloc[0],
],
utt_row["T60"].iloc[0],
)
room.generate_rirs()
rir = room.rir_reverberant
for i, mics in enumerate(rir):
for j, source in enumerate(mics):
h = resample_poly(source, sr, 16000)
h_torch = torch.from_numpy(h).float().unsqueeze(0)
torchaudio.save(
os.path.join(
output_dir, "{}_{}_".format(i, j) + output_name,
),
h_torch,
sr,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-dir",
type=str,
required=True,
help="The output directory for saving the rirs for random augmentation style",
)
args = parser.parse_args()
create_rirs(args.output_dir)
| 28.71831 | 97 | 0.518391 |
79540179b72e0281f0ce55e7dbd1eaf23fed7889 | 8,953 | py | Python | forms-flow-api/tests/unit/api/test_application.py | sreehari-aot/forms-flow-ai | 11e2fdd6da792aaa9dd46c0cec38564fe5916b58 | [
"Apache-2.0"
] | null | null | null | forms-flow-api/tests/unit/api/test_application.py | sreehari-aot/forms-flow-ai | 11e2fdd6da792aaa9dd46c0cec38564fe5916b58 | [
"Apache-2.0"
] | null | null | null | forms-flow-api/tests/unit/api/test_application.py | sreehari-aot/forms-flow-ai | 11e2fdd6da792aaa9dd46c0cec38564fe5916b58 | [
"Apache-2.0"
] | null | null | null | """Test suite for application API endpoint."""
import pytest
from tests.utilities.base_test import (
factory_auth_header,
get_application_create_payload,
get_form_request_payload,
)
class TestApplicationResource:
"""Test suite for the application endpoint."""
def test_application_no_auth_api(self, app, client, session):
"""Assert that API /application when passed with no token returns 401 status code."""
response = client.get("/application")
assert response.status_code == 401
assert response.json == {
"type": "Invalid Token Error",
"message": "Access to formsflow.ai API Denied. Check if the bearer token is passed for "
"Authorization or has expired.",
}
def test_application_list(self, app, client, session):
"""Assert that API/application when passed with valid token returns 200 status code."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
response = client.get("/application", headers=headers)
assert response.status_code == 200
@pytest.mark.parametrize(("pageNo", "limit"), ((1, 5), (1, 10), (1, 20)))
def test_application_paginated_list(self, app, client, session, pageNo, limit):
"""Tests the API/application endpoint with pageNo and limit query params."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
response = client.get(
f"/application?pageNo={pageNo}&limit={limit}", headers=headers
)
assert response.status_code == 200
@pytest.mark.parametrize(
("pageNo", "limit", "sortBy", "sortOrder"),
((1, 5, "id", "asc"), (1, 10, "id", "desc"), (1, 20, "id", "desc")),
)
def test_application_paginated_sorted_list(
self, app, client, session, pageNo, limit, sortBy, sortOrder
):
"""Tests the API/application endpoint with pageNo, limit, sortBy and SortOrder params."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
response = client.get(
f"/application?pageNo={pageNo}&limit={limit}&sortBy={sortBy}&sortOrder={sortOrder}",
headers=headers,
)
assert response.status_code == 200
@pytest.mark.parametrize(
("pageNo", "limit", "filters"),
(
(1, 5, "Id=1"),
(1, 10, "applicationName=Free"),
(1, 20, "applicationStatus=New"),
),
)
def test_application_paginated_filtered_list(
self,
app,
client,
session,
pageNo,
limit,
filters,
):
"""Tests the API/application endpoint with filter params."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
rv = client.post("/form", headers=headers, json=get_form_request_payload())
assert rv.status_code == 201
form_id = rv.json.get("formId")
rv = client.post(
"/application/create",
headers=headers,
json=get_application_create_payload(form_id),
)
assert rv.status_code == 201
response = client.get(
f"/application?pageNo={pageNo}&limit={limit}&{filters}",
headers=headers,
)
assert response.status_code == 200
class TestApplicationDetailView:
"""Test suite for the API/application/<id> endpoint."""
def test_application_no_auth_api(self, app, client, session):
"""Tests the endpoint with no token."""
response = client.get("/application/1")
assert response.status_code == 401
assert response.json == {
"type": "Invalid Token Error",
"message": "Access to formsflow.ai API Denied. Check if the "
"bearer token is passed for Authorization or has expired.",
}
def test_application_detailed_view(self, app, client, session):
"""Tests the endpoint with valid token."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
rv = client.post("/form", headers=headers, json=get_form_request_payload())
assert rv.status_code == 201
form_id = rv.json.get("formId")
rv = client.post(
"/application/create",
headers=headers,
json=get_application_create_payload(form_id),
)
assert rv.status_code == 201
application_id = rv.json.get("id")
response = client.get(f"/application/{application_id}", headers=headers)
assert response.status_code == 200
assert response.json['applicationName'] == 'Sample form'
assert response.json['processKey'] == 'oneStepApproval'
def test_application_resource_by_form_id(app, client, session):
"""Tests the application by formid endpoint with valid token."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
rv = client.post("/form", headers=headers, json=get_form_request_payload())
assert rv.status_code == 201
form_id = rv.json.get("formId")
rv = client.post(
"/application/create",
headers=headers,
json=get_application_create_payload(form_id),
)
assert rv.status_code == 201
response = client.get(f"/application/formid/{form_id}", headers=headers)
assert response.status_code == 200
def test_application_status_list(app, client, session):
"""Tests the application status list endpoint with valid payload."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
rv = client.post("/form", headers=headers, json=get_form_request_payload())
assert rv.status_code == 201
form_id = rv.json.get("formId")
rv = client.post(
"/application/create",
headers=headers,
json=get_application_create_payload(form_id),
)
assert rv.status_code == 201
response = client.get("/application/status/list", headers=headers)
assert response.status_code == 200
assert response.json["applicationStatus"]
def test_application_create_method(app, client, session):
"""Tests the application create method with valid payload."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
rv = client.post("/form", headers=headers, json=get_form_request_payload())
assert rv.status_code == 201
form_id = rv.json.get("formId")
rv = client.post(
"/application/create",
headers=headers,
json=get_application_create_payload(form_id),
)
assert rv.status_code == 201
def test_application_payload(app, client, session):
"""Tests the application create endpoint with valid payload."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
rv = client.post("/form", headers=headers, json=get_form_request_payload())
assert rv.status_code == 201
form_id = rv.json.get("formId")
rv = client.post(
"/application/create",
headers=headers,
json=get_application_create_payload(form_id),
)
assert rv.status_code == 201
application_response = rv.json
assert application_response["applicationStatus"] == "New"
assert application_response["formUrl"] == f"http://sample.com/form/{form_id}/submission/1233432"
def test_application_update_details_api(app, client, session):
"""Tests the application update endpoint with valid payload."""
token = factory_auth_header()
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
rv = client.post("/form", headers=headers, json=get_form_request_payload())
assert rv.status_code == 201
form_id = rv.json.get("formId")
rv = client.post(
"/application/create",
headers=headers,
json=get_application_create_payload(form_id),
)
assert rv.status_code == 201
application_id = rv.json.get("id")
assert rv != {}
rv = client.get(f"/application/{application_id}", headers=headers)
payload = rv.json
payload["applicationStatus"] = "New"
rv = client.put(f"/application/{application_id}", headers=headers, json=payload)
assert rv.status_code == 200
assert rv.json == "Updated successfully"
| 34.302682 | 100 | 0.624707 |
795402c19ffcdb82de32560aa3e330ee034cc1c8 | 1,450 | py | Python | Datacamp Assignments/Data Engineer Track/4. Writing Efficient Python Code/21_gathering_unique_items.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | Datacamp Assignments/Data Engineer Track/4. Writing Efficient Python Code/21_gathering_unique_items.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | Datacamp Assignments/Data Engineer Track/4. Writing Efficient Python Code/21_gathering_unique_items.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | 1 | 2021-03-10T09:40:05.000Z | 2021-03-10T09:40:05.000Z | # Use find_unique_items() to collect unique Pokémon names
uniq_names_func = find_unique_items(names)
print(len(uniq_names_func))
'''
368
'''
# Convert the names list to a set to collect unique Pokémon names
uniq_names_set = set(names)
print(len(uniq_names_set))
'''
368
'''
# Check that both unique collections are equivalent
print(sorted(uniq_names_func) == sorted(uniq_names_set))
'''
True
'''
# Use the best approach to collect unique primary types and generations
uniq_types = set(primary_types)
uniq_gens = set(generations)
print(uniq_types, uniq_gens, sep='\n')
'''
{'Dragon', 'Ice', 'Electric', 'Dark', 'Fighting', 'Ghost', 'Normal', 'Fire', 'Steel', 'Fairy', 'Poison', 'Water', 'Bug', 'Ground', 'Rock', 'Grass', 'Psychic'}
{1, 2, 3, 4, 5, 6}
'''
In [1]: %timeit find_unique_items(names)
# 2.03 ms +- 285 us per loop (mean +- std. dev. of 7 runs, 1000 loops each)
In [2]: %timeit set(names)
# 8.53 us +- 172 ns per loop (mean +- std. dev. of 7 runs, 100000 loops each)
'''
Using a set data type to collect unique values is much faster than using a for loop
(like in the find_unique_items() function). Since a set is defined as a collection of distinct elements,
it is an efficient way to collect unique items from an existing object. Here you took advantage of a set
to find the distinct Pokémon from the sample (eliminating duplicate Pokémon)
and saw what unique Pokémon types and generations were included in the sample.
''' | 30.851064 | 158 | 0.717931 |
795402d59431b803c332ac676a7b71ad83f40853 | 5,275 | py | Python | translate/instantiate.py | schultet/goa | b76af608109ff217a2b68ca4e66582850c0a47ea | [
"MIT"
] | 2 | 2019-05-13T11:59:18.000Z | 2019-05-13T12:37:51.000Z | translate/instantiate.py | schultet/goa | b76af608109ff217a2b68ca4e66582850c0a47ea | [
"MIT"
] | null | null | null | translate/instantiate.py | schultet/goa | b76af608109ff217a2b68ca4e66582850c0a47ea | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from __future__ import print_function
from collections import defaultdict
import build_model
import pddl_to_prolog
import pddl
import timers
def get_fluent_facts(task, model):
fluent_predicates = set()
for action in task.actions:
for effect in action.effects:
fluent_predicates.add(effect.literal.predicate)
for axiom in task.axioms:
fluent_predicates.add(axiom.name)
return set([fact for fact in model
if fact.predicate in fluent_predicates])
def get_objects_by_type(typed_objects, types):
result = defaultdict(list)
supertypes = {}
for type in types:
supertypes[type.name] = type.supertype_names
for obj in typed_objects:
result[obj.type].append(obj.name)
for type in supertypes[obj.type]:
result[type].append(obj.name)
return result
def instantiate(task, model, add_fluents):
relaxed_reachable = False
fluent_facts = get_fluent_facts(task, model)
fluent_facts |= add_fluents
init_facts = set(task.init)
type_to_objects = get_objects_by_type(task.objects, task.types)
instantiated_actions = []
instantiated_axioms = []
reachable_action_parameters = defaultdict(list)
for atom in model:
if isinstance(atom.predicate, pddl.Action):
action = atom.predicate
parameters = action.parameters
inst_parameters = atom.args[:len(parameters)]
# Note: It's important that we use the action object
# itself as the key in reachable_action_parameters (rather
# than action.name) since we can have multiple different
# actions with the same name after normalization, and we
# want to distinguish their instantiations.
reachable_action_parameters[action].append(inst_parameters)
variable_mapping = dict([(par.name, arg)
for par, arg in zip(parameters, atom.args)])
inst_action = action.instantiate(variable_mapping, init_facts,
fluent_facts, type_to_objects)
if inst_action:
instantiated_actions.append(inst_action)
elif isinstance(atom.predicate, pddl.Axiom):
axiom = atom.predicate
variable_mapping = dict([(par.name, arg)
for par, arg in zip(axiom.parameters, atom.args)])
inst_axiom = axiom.instantiate(variable_mapping, init_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
elif atom.predicate == "@goal-reachable":
relaxed_reachable = True
return (relaxed_reachable, fluent_facts, instantiated_actions,
sorted(instantiated_axioms), reachable_action_parameters)
def _explore(task, add_fluents = set()):
prog = pddl_to_prolog.translate(task, add_fluents)
model = build_model.compute_model(prog)
with timers.timing("Completing instantiation"):
return instantiate(task, model, add_fluents)
def public_fluents(fluents):
fluents = filter(lambda x: not x.is_private, fluents)
return [(f.predicate, f.args) for f in fluents]
def _exploreMA(task, add_pub_atoms = set()):
result = _explore(task, add_pub_atoms)
task.mark_private_atoms(result[1])
pub_fluents = public_fluents(result[1])
return result, pub_fluents
def exploreMA(task, comm):
add_atoms = set()
if comm.is_master:
# Initial exploration done by the master agent
res, pub_fluents = _exploreMA(task, add_atoms)
comm.sendInRing(pub_fluents)
while True:
# receive all public fluents from the previous agent in ring
pub_fluents = comm.recvInRing()
if pub_fluents is None:
# Detect end of the distributed exploration
if not comm.is_master:
comm.sendInRing(None)
break
if comm.agent_id == 0:
# if the master agent has received the same set of fluents as
# it already has, it means that the set cannot change anymore
pub_cmp = public_fluents(res[1])
if sorted(pub_fluents) == sorted(pub_cmp):
comm.sendInRing(None)
continue
add_atoms = set([pddl.Atom(x[0], x[1]) for x in pub_fluents])
res, pub_fluents = _exploreMA(task, add_atoms)
comm.sendInRing(pub_fluents)
if not res[0]:
not_reach = set(task.goal.parts) - res[1]
print('Not reachable atoms: {0}'.format(' '.join([str(x) for x in not_reach])))
return res
def explore(task, comm = None):
if comm is not None:
return exploreMA(task, comm)
else:
return _explore(task)
if __name__ == "__main__":
task = pddl.open()
relaxed_reachable, atoms, actions, axioms, _ = explore(task)
print("goal relaxed reachable: %s" % relaxed_reachable)
print("%d atoms:" % len(atoms))
for atom in atoms:
print(" ", atom)
print()
print("%d actions:" % len(actions))
for action in actions:
action.dump()
print()
print()
print("%d axioms:" % len(axioms))
for axiom in axioms:
axiom.dump()
print()
| 35.641892 | 87 | 0.640758 |
795403e1b2351da644ae486f1de11d26fbd4fda5 | 101 | py | Python | src/bucket_service/bucket_app/run.py | cinmoy98/bucket-list---microservices-restapi-flask-mongodb | 2f1f34c1e88ef3d7d06447042a3c1256dd789a4a | [
"MIT"
] | null | null | null | src/bucket_service/bucket_app/run.py | cinmoy98/bucket-list---microservices-restapi-flask-mongodb | 2f1f34c1e88ef3d7d06447042a3c1256dd789a4a | [
"MIT"
] | null | null | null | src/bucket_service/bucket_app/run.py | cinmoy98/bucket-list---microservices-restapi-flask-mongodb | 2f1f34c1e88ef3d7d06447042a3c1256dd789a4a | [
"MIT"
] | null | null | null | from app import bapp
if __name__ == '__main__':
bapp.run(host = '127.0.0.3', port=5000, debug=True) | 25.25 | 52 | 0.683168 |
795404e4e14faecb5237aee9e42759988b11d60b | 1,354 | py | Python | ooobuild/dyn/report/x_report_engine.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/report/x_report_engine.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/report/x_report_engine.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.report
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.report import XReportEngine as XReportEngine
setattr(XReportEngine, '__ooo_ns__', 'com.sun.star.report')
setattr(XReportEngine, '__ooo_full_ns__', 'com.sun.star.report.XReportEngine')
setattr(XReportEngine, '__ooo_type_name__', 'interface')
else:
from ...lo.report.x_report_engine import XReportEngine as XReportEngine
__all__ = ['XReportEngine']
| 36.594595 | 82 | 0.767356 |
7954051e64c761a6473a4ae54e063c1b13269191 | 23,695 | py | Python | onmt/Models.py | mingchen62/im2text-pytorch | 9516be1aad70517603383a92670c296f8d7e343e | [
"MIT"
] | 1 | 2020-03-24T08:42:38.000Z | 2020-03-24T08:42:38.000Z | onmt/Models.py | mingchen62/im2text-pytorch | 9516be1aad70517603383a92670c296f8d7e343e | [
"MIT"
] | null | null | null | onmt/Models.py | mingchen62/im2text-pytorch | 9516be1aad70517603383a92670c296f8d7e343e | [
"MIT"
] | null | null | null | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
import onmt
from onmt.Utils import aeq
def rnn_factory(rnn_type, **kwargs):
# Use pytorch version when available.
no_pack_padded_seq = False
if rnn_type == "SRU":
# SRU doesn't support PackedSequence.
no_pack_padded_seq = True
rnn = onmt.modules.SRU(**kwargs)
else:
rnn = getattr(nn, rnn_type)(**kwargs)
return rnn, no_pack_padded_seq
class EncoderBase(nn.Module):
"""
Base encoder class. Specifies the interface used by different encoder types
and required by :obj:`onmt.Models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
F[Memory_Bank]
G[Final]
A-->C
A-->D
A-->E
C-->F
D-->F
E-->F
E-->G
"""
def _check_args(self, input, lengths=None, hidden=None):
s_len, n_batch, n_feats = input.size()
if lengths is not None:
n_batch_, = lengths.size()
aeq(n_batch, n_batch_)
def forward(self, src, lengths=None, encoder_state=None):
"""
Args:
src (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]`
encoder_state (rnn-class specific):
initial encoder_state state.
Returns:
(tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):
* final encoder state, used to initialize decoder
* memory bank for attention, `[src_len x batch x hidden]`
"""
raise NotImplementedError
class MeanEncoder(EncoderBase):
"""A trivial non-recurrent encoder. Simply applies mean pooling.
Args:
num_layers (int): number of replicated layers
embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use
"""
def __init__(self, num_layers, embeddings):
super(MeanEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
def forward(self, src, lengths=None, encoder_state=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths, encoder_state)
emb = self.embeddings(src)
s_len, batch, emb_dim = emb.size()
mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)
memory_bank = emb
encoder_final = (mean, mean)
return encoder_final, memory_bank
class RNNEncoder(EncoderBase):
""" A generic recurrent neural network encoder.
Args:
rnn_type (:obj:`str`):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional (bool) : use a bidirectional RNN
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
dropout (float) : dropout value for :obj:`nn.Dropout`
embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use
"""
def __init__(self, rnn_type, bidirectional, num_layers,
hidden_size, dropout=0.0, embeddings=None,
use_bridge=False):
super(RNNEncoder, self).__init__()
assert embeddings is not None
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = embeddings
self.rnn, self.no_pack_padded_seq = \
rnn_factory(rnn_type,
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
# Initialize the bridge layer
self.use_bridge = use_bridge
if self.use_bridge:
self._initialize_bridge(rnn_type,
hidden_size,
num_layers)
def forward(self, src, lengths=None, encoder_state=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths, encoder_state)
emb = self.embeddings(src)
s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Variable.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
memory_bank, encoder_final = self.rnn(packed_emb, encoder_state)
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
return encoder_final, memory_bank
def _initialize_bridge(self, rnn_type,
hidden_size,
num_layers):
# LSTM has hidden and cell state, other only one
number_of_states = 2 if rnn_type == "LSTM" else 1
# Total number of states
self.total_hidden_dim = hidden_size * num_layers
# Build a linear layer for each
self.bridge = nn.ModuleList([nn.Linear(self.total_hidden_dim,
self.total_hidden_dim,
bias=True)
for i in range(number_of_states)])
def _bridge(self, hidden):
"""
Forward hidden state through bridge
"""
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
return F.relu(result).view(size)
if isinstance(hidden, tuple): # LSTM
outs = tuple([bottle_hidden(layer, hidden[ix])
for ix, layer in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], hidden)
return outs
class RNNDecoderBase(nn.Module):
"""
Base recurrent attention-based decoder class.
Specifies the interface used by different decoder types
and required by :obj:`onmt.Models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
G[Decoder State]
H[Decoder State]
I[Outputs]
F[Memory_Bank]
A--emb-->C
A--emb-->D
A--emb-->E
H-->C
C-- attn --- F
D-- attn --- F
E-- attn --- F
C-->I
D-->I
E-->I
E-->G
F---I
Args:
rnn_type (:obj:`str`):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional_encoder (bool) : use with a bidirectional encoder
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
attn_type (str) : see :obj:`onmt.modules.GlobalAttention`
coverage_attn (str): see :obj:`onmt.modules.GlobalAttention`
context_gate (str): see :obj:`onmt.modules.ContextGate`
copy_attn (bool): setup a separate copy attention mechanism
dropout (float) : dropout value for :obj:`nn.Dropout`
embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type="general",
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=0.0, embeddings=None,
reuse_copy_attn=False):
super(RNNDecoderBase, self).__init__()
# Basic attributes.
self.decoder_type = 'rnn'
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
# Build the RNN.
self.rnn = self._build_rnn(rnn_type,
input_size=self._input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = onmt.modules.context_gate_factory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
self.attn = onmt.modules.GlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type
)
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn and not reuse_copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type
)
if copy_attn:
self._copy = True
self._reuse_copy_attn = reuse_copy_attn
def forward(self, tgt, memory_bank, state, memory_lengths=None):
"""
Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.Models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`.
"""
# Check
assert isinstance(state, RNNDecoderState)
tgt_len, tgt_batch, _ = tgt.size()
_, memory_batch, _ = memory_bank.size()
aeq(tgt_batch, memory_batch)
# END
# Run the forward pass of the RNN.
decoder_final, decoder_outputs, attns = self._run_forward_pass(
tgt, memory_bank, state, memory_lengths=memory_lengths)
# Update the state with the result.
final_output = decoder_outputs[-1]
coverage = None
if "coverage" in attns:
coverage = attns["coverage"][-1].unsqueeze(0)
state.update_state(decoder_final, final_output.unsqueeze(0), coverage)
# Concatenates sequence of tensors along a new dimension.
decoder_outputs = torch.stack(decoder_outputs)
for k in attns:
attns[k] = torch.stack(attns[k])
return decoder_outputs, state, attns
def init_decoder_state(self, src, memory_bank, encoder_final):
def _fix_enc_hidden(h):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
if isinstance(encoder_final, tuple): # LSTM
return RNNDecoderState(self.hidden_size,
tuple([_fix_enc_hidden(enc_hid)
for enc_hid in encoder_final]))
else: # GRU
return RNNDecoderState(self.hidden_size,
_fix_enc_hidden(encoder_final))
class StdRNNDecoder(RNNDecoderBase):
"""
Standard fully batched RNN decoder with attention.
Faster implementation, uses CuDNN for implementation.
See :obj:`RNNDecoderBase` for options.
Based around the approach from
"Neural Machine Translation By Jointly Learning To Align and Translate"
:cite:`Bahdanau2015`
Implemented without input_feeding and currently with no `coverage_attn`
or `copy_attn` support.
"""
def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Variable): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder.
"""
assert not self._copy # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
# Initialize local and return variables.
attns = {}
emb = self.embeddings(tgt)
# Run the forward pass of the RNN.
if isinstance(self.rnn, nn.GRU):
rnn_output, decoder_final = self.rnn(emb, state.hidden[0])
else:
rnn_output, decoder_final = self.rnn(emb, state.hidden)
# Check
tgt_len, tgt_batch, _ = tgt.size()
output_len, output_batch, _ = rnn_output.size()
aeq(tgt_len, output_len)
aeq(tgt_batch, output_batch)
# END
# Calculate the attention.
decoder_outputs, p_attn = self.attn(
rnn_output.transpose(0, 1).contiguous(),
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths
)
attns["std"] = p_attn
# Calculate the context gate.
if self.context_gate is not None:
decoder_outputs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
decoder_outputs.view(-1, decoder_outputs.size(2))
)
decoder_outputs = \
decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)
decoder_outputs = self.dropout(decoder_outputs)
return decoder_final, decoder_outputs, attns
def _build_rnn(self, rnn_type, **kwargs):
rnn, _ = rnn_factory(rnn_type, **kwargs)
return rnn
@property
def _input_size(self):
"""
Private helper returning the number of expected features.
"""
return self.embeddings.embedding_size
class InputFeedRNNDecoder(RNNDecoderBase):
"""
Input feeding based decoder. See :obj:`RNNDecoderBase` for options.
Based around the input feeding approach from
"Effective Approaches to Attention-based Neural Machine Translation"
:cite:`Luong2015`
.. mermaid::
graph BT
A[Input n-1]
AB[Input n]
subgraph RNN
E[Pos n-1]
F[Pos n]
E --> F
end
G[Encoder]
H[Memory_Bank n-1]
A --> E
AB --> F
E --> H
G --> H
"""
def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
input_feed = state.input_feed.squeeze(0)
input_feed_batch, _ = input_feed.size()
tgt_len, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
# Initialize local and return variables.
decoder_outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
hidden = state.hidden
coverage = state.coverage.squeeze(0) \
if state.coverage is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for i, emb_t in enumerate(emb.split(1)):
emb_t = emb_t.squeeze(0)
decoder_input = torch.cat([emb_t, input_feed], 1)
rnn_output, hidden = self.rnn(decoder_input, hidden)
decoder_output, p_attn = self.attn(
rnn_output,
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths)
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
decoder_outputs += [decoder_output]
attns["std"] += [p_attn]
# Update the coverage attention.
if self._coverage:
coverage = coverage + p_attn \
if coverage is not None else p_attn
attns["coverage"] += [coverage]
# Run the forward pass of the copy attention layer.
if self._copy and not self._reuse_copy_attn:
_, copy_attn = self.copy_attn(decoder_output,
memory_bank.transpose(0, 1))
attns["copy"] += [copy_attn]
elif self._copy:
attns["copy"] = attns["std"]
# Return result.
return hidden, decoder_outputs, attns
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert not rnn_type == "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
if rnn_type == "LSTM":
stacked_cell = onmt.modules.StackedLSTM
else:
stacked_cell = onmt.modules.StackedGRU
return stacked_cell(num_layers, input_size,
hidden_size, dropout)
@property
def _input_size(self):
"""
Using input feed by concatenating input with attention vectors.
"""
return self.embeddings.embedding_size + self.hidden_size
class NMTModel(nn.Module):
"""
Core trainable object in OpenNMT. Implements a trainable interface
for a simple, generic encoder + decoder model.
Args:
encoder (:obj:`EncoderBase`): an encoder object
decoder (:obj:`RNNDecoderBase`): a decoder object
multi<gpu (bool): setup for multigpu support
"""
def __init__(self, encoder, decoder, multigpu=False):
self.multigpu = multigpu
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, dec_state=None):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state
"""
tgt = tgt[:-1] # exclude last target from inputs
#print "src", src
#print "lengths", lengths
enc_final, memory_bank = self.encoder(src, lengths)
enc_state = \
self.decoder.init_decoder_state(src, memory_bank, enc_final)
decoder_outputs, dec_state, attns = \
self.decoder(tgt, memory_bank,
enc_state if dec_state is None
else dec_state,
memory_lengths=lengths)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return decoder_outputs, attns, dec_state
class DecoderState(object):
"""Interface for grouping together the current state of a recurrent
decoder. In the simplest case just represents the hidden state of
the model. But can also be used for implementing various forms of
input_feeding and non-recurrent models.
Modules need to implement this to utilize beam search decoding.
"""
def detach(self):
for h in self._all:
if h is not None:
h.detach_()
def beam_update(self, idx, positions, beam_size):
for e in self._all:
a, br, d = e.size()
sent_states = e.view(a, beam_size, br // beam_size, d)[:, :, idx]
sent_states.data.copy_(
sent_states.data.index_select(1, positions))
class RNNDecoderState(DecoderState):
def __init__(self, hidden_size, rnnstate):
"""
Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim).
"""
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.coverage = None
# Init the input feed.
batch_size = self.hidden[0].size(1)
h_size = (batch_size, hidden_size)
self.input_feed = Variable(self.hidden[0].data.new(*h_size).zero_(),
requires_grad=False).unsqueeze(0)
@property
def _all(self):
return self.hidden + (self.input_feed,)
def update_state(self, rnnstate, input_feed, coverage):
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.input_feed = input_feed
self.coverage = coverage
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
vars = [Variable(e.data.repeat(1, beam_size, 1), volatile=True)
for e in self._all]
self.hidden = tuple(vars[:-1])
self.input_feed = vars[-1]
| 35.955994 | 79 | 0.578097 |
79540567aa7b78345ec78d7784122b87ed62854a | 17,453 | py | Python | marl/algorithms/masac/run_masac.py | Justin-Yuan/learn-to-interact | eb013bb3bab269bda8a8075e64fe3bcd2964d8ae | [
"MIT"
] | 1 | 2021-01-14T02:49:58.000Z | 2021-01-14T02:49:58.000Z | marl/algorithms/masac/run_masac.py | Justin-Yuan/learn-to-interact | eb013bb3bab269bda8a8075e64fe3bcd2964d8ae | [
"MIT"
] | 8 | 2020-09-25T21:36:55.000Z | 2022-02-10T01:17:25.000Z | marl/algorithms/masac/run_masac.py | Justin-Yuan/learn-to-interact | eb013bb3bab269bda8a8075e64fe3bcd2964d8ae | [
"MIT"
] | 1 | 2020-10-26T13:52:16.000Z | 2020-10-26T13:52:16.000Z | import os
import sys
# path at level marl/
sys.path.insert(0, os.path.abspath("."))
import time
import argparse
import numpy as np
from functools import partial
from collections import OrderedDict, defaultdict
import torch
# local
from algorithms.masac.utils import get_sample_scheme, dispatch_samples
from algorithms.masac.utils import make_parallel_env, log_results
from algorithms.masac import MASAC
from runners.make_env import ENV_MAP
from runners.sample_batch import EpisodeBatch
from runners.ctde_runner import CTDEEpisodeRunner
from runners.replay_buffer import EpisodeReplayBuffer
from utils.exp_utils import setup_experiment, ExperimentLogger, ExperimentState
from utils.exp_utils import time_left, time_str, merge_dict
#####################################################################################
### arguments
#####################################################################################
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp", type=str, default="masac",
help="name of the experiment")
parser.add_argument("--save_dir", type=str, default="./exps",
help="top level path to save experiment/training results")
parser.add_argument("--sub_dir", type=str, nargs='+',
help="sub folders for experiment (hierarchical), e.g. sub=a b c --> local-dir/a/b/c")
parser.add_argument("--tag", type=str, nargs='+',
help="additional info for experiment, i.e. hyperparameters")
parser.add_argument("--seed", default=1, type=int,
help="Random seed, if 0, do not set seed")
parser.add_argument("--restore", type=str, default=None,
help="directory in which training state and model are loaded")
# if specified and not restore, will load model for experiment init
# if also restore, will overwrite default path in restore_experiment
parser.add_argument("--restore_model", type=str, default=None,
help="file in which model are loaded")
## NOTE: episode-wise or transition-wise (per transtion now, easier to log)
parser.add_argument("--log_interval", default=25000, type=int,
help="frequency to log exploration/runner stats")
parser.add_argument("--train_interval", default=0, type=int,
help="number of steps collected before each train")
# parser.add_argument("--steps_per_update", default=100, type=int,
# help="number of env steps collected before 1 training update")
parser.add_argument("--target_update_interval", default=0, type=int,
help="syncing parameters with target networks")
parser.add_argument("--train_log_interval", default=25000, type=int,
help="frequency to log training stats, e.g. losses")
parser.add_argument("--eval_interval", default=25000, type=int,
help="number of steps collected before each evaluation")
parser.add_argument("--save_interval", default=100000, type=int)
# misc
parser.add_argument("--cuda", default=False, action='store_true')
parser.add_argument("--cluster", default=False, action='store_true',
help='if running in cluster (allow more resources)')
parser.add_argument("--overwrite", type=str, nargs='+',
help="overwrite env config with format: nested_name nested_type value ...")
parser.add_argument("--use_tensorboard", default=False, action='store_true',
help="if to use tensorboard for logging")
parser.add_argument("--show_visual_range", default=False, action='store_true',
help='if to show agent visual range when rendering')
# Environment
parser.add_argument("--env", type=str, default="mpe_hier",
help="name of the environment", choices=["mpe", "mpe_hier"])
parser.add_argument("--scenario", type=str, default="simple_spread",
help="name of the scenario script")
parser.add_argument("--env_config", type=str, default="",
help="file to environment scenario config")
## max episode length for termination
parser.add_argument("--episode_length", default=25, type=int,
help="max episode length")
parser.add_argument("--agent_alg", default="MASAC", type=str,
help="agent model type", choices=['MASAC', 'SAC'])
parser.add_argument("--adversary_alg", default="MASAC", type=str,
help="adversary model type", choices=['MASAC', 'SAC'])
parser.add_argument("--discrete_action", action='store_true')
# training
parser.add_argument("--n_episodes", default=20000, type=int,
help="max number of episodes to sample")
## for non-early-terminated episodes, n_env_steps ~= n_episodes * episode_length
parser.add_argument("--n_env_steps", default=500000, type=int,
help="max number of env steps to sample")
## NOTE: episode-wise or step-wise (episode now)
parser.add_argument("--batch_size", default=32, type=int,
help="Batch size for model training per update")
## in case train batch size too large, could use smaller batch size
## but multiple rounds of updates
parser.add_argument("--n_updates_per_train", default=1, type=int,
help="number of updates per training round")
parser.add_argument("--lr", default=0.01, type=float)
parser.add_argument("--tau", default=0.01, type=float)
parser.add_argument("--gamma", type=float, default=0.95,
help="discount factor")
parser.add_argument("--sync_samples", default=False, action='store_true',
help="if to use synchronized samples for each agent training")
# sac parameters
parser.add_argument("--target_entropy", type=float, default=10.0,
help="constraint on SAC entropy target")
# exploration/sampling
## NOTE: episode-wise or transition-wise (per episodes now)
parser.add_argument("--sample_batch_size", default=8, type=int,
help="number of data points sampled () per run")
parser.add_argument("--max_buffer_size", default=40000, type=int,
help="maximum number of samples (episodes) to save in replay buffer")
# parser.add_argument("--max_buffer_size", default=int(1e6), type=int,
# help="maximum number of samples (transitions) to save in replay buffer")
parser.add_argument("--n_exploration_eps", default=25000, type=int,
help="what is this ???")
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--n_step", type=int, default=1,
help="length of multistep value backup")
# model
parser.add_argument("--hidden_dim", default=64, type=int)
parser.add_argument("--critic", type=str, default="mlp",
help="type of critic network", choices=["mlp", "rnn", "gnn"])
parser.add_argument("--actor", type=str, default="mlp",
help="type of actor network", choices=["mlp", "rnn", "gnn"])
# evaluation
parser.add_argument("--no_eval", default=False, action='store_true',
help="do evaluation during training")
parser.add_argument("--no_render", default=False, action='store_true',
help='if to stop rendering in evaluation rollouts')
parser.add_argument("--eval_n_episodes", default=10, type=int)
parser.add_argument("--eval_batch_size", default=2, type=int,
help="number of data points evaluated () per run")
# loggings
parser.add_argument("--log_agent_returns", default=False, action='store_true',
help="if to log per agent returns on tensorboard")
# parallelism
parser.add_argument("--n_rollout_threads", default=4, type=int,
help="number of parallel sampling workers to use")
parser.add_argument("--n_training_threads", default=4, type=int)
args = parser.parse_args()
return args
#####################################################################################
### main
####################################################################################
def run(args):
""" main entry func """
# NOTE: experiment setup
config, is_restore = setup_experiment(args)
logger = ExperimentLogger(config.save_dir, log_std_out=True, use_tensorboard=config.use_tensorboard)
if not config.cuda:
torch.set_num_threads(config.n_training_threads)
# NOTE: init/load experiment state
estate = ExperimentState()
if is_restore:
estate.load_state(config.restore_exp_state)
# make counter copies to reduce writing ...
episode = estate.episode # total episodes so far
t_env = estate.t_env # total env interacetion steps so far
# t_max = config.n_env_steps # max number of steps to runs
t_max = config.n_episodes * config.episode_length
# NOTE: make vectorized env
env_func = ENV_MAP[config.env]
p_env_func = partial(env_func, config.scenario, benchmark=False,
show_visual_range=config.show_visual_range)
env = make_parallel_env(p_env_func, config.env_config, config.sample_batch_size,
config.n_rollout_threads, config.seed)
if not config.no_eval:
eval_env = make_parallel_env(p_env_func, config.env_config,
config.eval_batch_size, 1, config.seed)
# NOTE: make learner agent
if is_restore or config.restore_model is not None:
learner = MASAC.init_from_save(config.restore_model)
else:
learner = MASAC.init_from_env(
env,
agent_alg=config.agent_alg,
adversary_alg=config.adversary_alg,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim,
rnn_policy=(config.actor == "rnn"),
rnn_critic=(config.critic == "rnn"),
# sac stuff
target_entropy=config.target_entropy
)
# NOTE: make sampling runner (env wrapper)
scheme = get_sample_scheme(learner.nagents, env.observation_space, env.action_space)
runner = CTDEEpisodeRunner(scheme, env, learner, logger, config.sample_batch_size,
config.episode_length, device=config.device, t_env=t_env,
ma_step_keys=["log_probs"], is_training=True)
if not config.no_eval:
eval_runner = CTDEEpisodeRunner(scheme, eval_env, learner, logger,
config.eval_batch_size, config.episode_length,
device=config.device, t_env=t_env,
ma_step_keys=["log_probs"], is_training=False)
buffer = EpisodeReplayBuffer(scheme, config.max_buffer_size,
config.episode_length, device=config.device, prefill_num=2*config.batch_size)
# NOTE: start training
logger.info("Beginning training")
start_time = time.time()
last_time = start_time
############################################
# while t_env <= t_max:
while episode <= config.n_episodes:
# NOTE: Run for a whole episode at a time
learner.prep_rollouts(device=config.device)
explr_pct_remaining = max(0, config.n_exploration_eps - episode) / config.n_exploration_eps
learner.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
learner.reset_noise()
episode_batch, _ = runner.run()
buffer.insert_episode_batch(episode_batch)
# update counters
episode += config.sample_batch_size
t_env = runner.t_env
estate.episode = episode
estate.t_env = t_env
############################################
# NOTE: logging (exploration/sampling)
if (estate.last_log_t == 0) or (t_env - estate.last_log_t >= config.log_interval):
logger.info("\n")
logger.info("*** sampling log ***")
# timing
logger.info("t_env: {} / {}, eps: {} / {}".format(
t_env, t_max, episode, config.n_episodes))
logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, estate.last_log_t, t_env, t_max),
time_str(time.time() - start_time)
))
last_time = time.time()
# log collected episode stats
results = runner.get_summaries()
runner.reset_summaries()
log_results(t_env, results, logger, mode="sample",
log_agent_returns=config.log_agent_returns)
estate.last_log_t = t_env
############################################
# NOTE: training updates
## change to batch_size * n_updates_per_train for n_updates > 1
if buffer.can_sample(config.batch_size) and (t_env - estate.last_train_t >= config.train_interval):
learner.prep_training(device=config.device)
for _ in range(config.n_updates_per_train):
episode_sample = None
for a_i in range(learner.nagents):
if config.sync_samples:
# if not None, reuse episode_sample
if episode_sample is None:
episode_sample = buffer.sample(config.batch_size)
else:
# each agent can have different collective experience samples
episode_sample = buffer.sample(config.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != config.device:
episode_sample.to(config.device)
# dispatch sample to per agent [(B,T,D)]*N
sample = dispatch_samples(episode_sample, scheme, learner.nagents)
learner.update(sample, a_i) #, logger=logger)
# sync target networks
if t_env - estate.last_target_update_t >= config.target_update_interval:
learner.update_all_targets()
estate.last_target_update_t = t_env
learner.prep_rollouts(device=config.device)
estate.last_train_t = t_env
# collect & log trianing stats
if t_env - estate.last_train_log_t >= config.train_log_interval:
train_results = learner.get_summaries()
learner.reset_summaries()
logger.info("\n")
logger.info("*** training log ***")
log_results(t_env, train_results, logger, mode="train")
estate.last_train_log_t = t_env
############################################
# NOTE: Execute test runs once in a while
if not config.no_eval and ((estate.last_test_t == 0) or (t_env - estate.last_test_t >= config.eval_interval)):
n_test_runs = max(1, config.eval_n_episodes // eval_runner.batch_size)
eval_episodes = []
for _ in range(n_test_runs):
eval_bt, _ = eval_runner.run(render=(not config.no_render))
eval_episodes.append(eval_bt)
# collect evaluation stats
eval_results = eval_runner.get_summaries()
eval_runner.reset_summaries()
eval_episodes = eval_episodes[0].concat(eval_episodes[1:])
logger.info("\n")
logger.info("*** evaluation log ***")
log_results(t_env, eval_results, logger, mode="eval", episodes=eval_episodes,
log_agent_returns=config.log_agent_returns)
estate.last_test_t = t_env
############################################
# NOTE: checkpoint
if (estate.last_save_t == 0) or (t_env - estate.last_save_t >= config.save_interval):
os.makedirs(config.save_dir + "/checkpoints", exist_ok=True)
learner.save(config.save_dir + "/checkpoints" + "/model_{}.ckpt".format(t_env))
learner.save(config.save_dir + "/model.ckpt")
logger.info("\n")
logger.info("*** checkpoint log ***")
logger.info("Saving models to {}".format(
"/checkpoints" + "/model_{}.ckpt".format(t_env)
))
estate.last_save_t = t_env
estate.save_state(config.save_dir + "/exp_state.pkl")
############################################
# NOTE: clean up
learner.save(config.save_dir + "/model.ckpt") # final save
estate.last_save_t = t_env
estate.save_state(config.save_dir + "/exp_state.pkl")
env.close()
logger.export_scalars_to_json("summary.json")
logger.info("Finished Training")
logger.close()
if __name__ == '__main__':
args = parse_args()
run(args)
| 49.16338 | 130 | 0.600527 |
795406276555af9a490d25509ba6a7b134fbadbd | 3,155 | py | Python | great_expectations/render/renderer/page_renderer.py | orenovadia/great_expectations | 76ef0c4e066227f8b589a1ee6ac885618f65906e | [
"Apache-2.0"
] | null | null | null | great_expectations/render/renderer/page_renderer.py | orenovadia/great_expectations | 76ef0c4e066227f8b589a1ee6ac885618f65906e | [
"Apache-2.0"
] | null | null | null | great_expectations/render/renderer/page_renderer.py | orenovadia/great_expectations | 76ef0c4e066227f8b589a1ee6ac885618f65906e | [
"Apache-2.0"
] | null | null | null | from .renderer import Renderer
from .column_section_renderer import (
DescriptiveColumnSectionRenderer,
PrescriptiveColumnSectionRenderer,
)
from .other_section_renderer import (
DescriptiveOverviewSectionRenderer,
)
class PrescriptivePageRenderer(Renderer):
@classmethod
def render(cls, expectations):
# Group expectations by column
columns = {}
ordered_columns = None
for expectation in expectations["expectations"]:
if "column" in expectation["kwargs"]:
column = expectation["kwargs"]["column"]
else:
column = "_nocolumn"
if column not in columns:
columns[column] = []
columns[column].append(expectation)
# if possible, get the order of columns from expect_table_columns_to_match_ordered_list
if expectation["expectation_type"] == "expect_table_columns_to_match_ordered_list":
exp_column_list = expectation["kwargs"]["column_list"]
if exp_column_list and len(exp_column_list) > 0:
ordered_columns = exp_column_list
# if no order of colums is expected, sort alphabetically
if not ordered_columns:
ordered_columns = sorted(list(columns.keys()))
return {
"renderer_type": "PrescriptivePageRenderer",
"sections": [
PrescriptiveColumnSectionRenderer.render(columns[column]) for column in ordered_columns
]
}
class DescriptivePageRenderer(Renderer):
@classmethod
def render(cls, validation_results):
# Group EVRs by column
columns = {}
for evr in validation_results["results"]:
if "column" in evr["expectation_config"]["kwargs"]:
column = evr["expectation_config"]["kwargs"]["column"]
else:
column = "Table-level Expectations"
if column not in columns:
columns[column] = []
columns[column].append(evr)
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
column_types = DescriptiveOverviewSectionRenderer._get_column_types(validation_results)
if "data_asset_name" in validation_results["meta"] and validation_results["meta"]["data_asset_name"]:
data_asset_name = validation_results["meta"]["data_asset_name"].split('/')[-1]
else:
data_asset_name = None
return {
"renderer_type": "DescriptivePageRenderer",
"data_asset_name": data_asset_name,
"sections":
[
DescriptiveOverviewSectionRenderer.render(
validation_results,
section_name="Overview"
)
] +
[
DescriptiveColumnSectionRenderer.render(
columns[column],
section_name=column,
column_type=column_types.get(column),
) for column in ordered_columns
]
}
| 36.264368 | 109 | 0.593344 |
795406d9d5d313c79ef7d034e8713db6265b2bd7 | 9,074 | py | Python | airflow/sensors/base_sensor_operator.py | j-y-matsubara/airflow | 94a7673e8b208af165a191d940034bfb8b8e0b7e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/sensors/base_sensor_operator.py | j-y-matsubara/airflow | 94a7673e8b208af165a191d940034bfb8b8e0b7e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/sensors/base_sensor_operator.py | j-y-matsubara/airflow | 94a7673e8b208af165a191d940034bfb8b8e0b7e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-07-06T17:12:07.000Z | 2021-07-06T17:12:07.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import os
from datetime import timedelta
from time import sleep
from typing import Any, Dict, Iterable
from airflow.exceptions import (
AirflowException, AirflowRescheduleException, AirflowSensorTimeout, AirflowSkipException,
)
from airflow.models import BaseOperator, SkipMixin, TaskReschedule
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: float
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: float
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
:type mode: str
:param exponential_backoff: allow progressive longer waits between
pokes by using exponential backoff algorithm
:type exponential_backoff: bool
"""
ui_color = '#e6f1f2' # type: str
valid_modes = ['poke', 'reschedule'] # type: Iterable[str]
@apply_defaults
def __init__(self,
poke_interval: float = 60,
timeout: float = 60 * 60 * 24 * 7,
soft_fail: bool = False,
mode: str = 'poke',
exponential_backoff: bool = False,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
self.mode = mode
self.exponential_backoff = exponential_backoff
self._validate_input_values()
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException(
"The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException(
"The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise AirflowException(
"The mode must be one of {valid_modes},"
"'{d}.{t}'; received '{m}'."
.format(valid_modes=self.valid_modes,
d=self.dag.dag_id if self.dag else "",
t=self.task_id, m=self.mode))
def poke(self, context: Dict) -> bool:
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def execute(self, context: Dict) -> Any:
started_at = timezone.utcnow()
try_number = 1
if self.reschedule:
# If reschedule, use first start date of current try
task_reschedules = TaskReschedule.find_for_task_instance(context['ti'])
if task_reschedules:
started_at = task_reschedules[0].start_date
try_number = len(task_reschedules) + 1
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
# If sensor is in soft fail mode but will be retried then
# give it a chance and fail with timeout.
# This gives the ability to set up non-blocking AND soft-fail sensors.
if self.soft_fail and not context['ti'].is_eligible_to_retry():
self._do_skip_downstream_tasks(context)
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
if self.reschedule:
reschedule_date = timezone.utcnow() + timedelta(
seconds=self._get_next_poke_interval(started_at, try_number))
raise AirflowRescheduleException(reschedule_date)
else:
sleep(self._get_next_poke_interval(started_at, try_number))
try_number += 1
self.log.info("Success criteria met. Exiting.")
def _do_skip_downstream_tasks(self, context: Dict) -> None:
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
def _get_next_poke_interval(self, started_at, try_number):
"""
Using the similar logic which is used for exponential backoff retry delay for operators.
"""
if self.exponential_backoff:
min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))
current_time = timezone.utcnow()
run_hash = int(hashlib.sha1("{}#{}#{}#{}".format(
self.dag_id, self.task_id, started_at, try_number
).encode("utf-8")).hexdigest(), 16)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
new_interval = min(self.timeout - int((current_time - started_at).total_seconds()),
delay_backoff_in_seconds)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
else:
return self.poke_interval
@property
def reschedule(self):
"""Define mode rescheduled sensors."""
return self.mode == 'reschedule'
# pylint: disable=no-member
@property
def deps(self):
"""
Adds one additional dependency for all sensor operators that
checks if a sensor task instance can be rescheduled.
"""
if self.reschedule:
return BaseOperator.deps.fget(self) | {ReadyToRescheduleDep()}
return BaseOperator.deps.fget(self)
def poke_mode_only(cls):
"""
Class Decorator for child classes of BaseSensorOperator to indicate
that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
:type cls: type
"""
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if value != 'poke':
raise ValueError(
f"cannot set mode to 'poke'.")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}.")
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
if 'BUILDING_AIRFLOW_DOCS' in os.environ:
# flake8: noqa: F811
# Monkey patch hook to get good function headers while building docs
apply_defaults = lambda x: x
| 41.623853 | 96 | 0.641724 |
79540885beac52316d128981f3ef0c3ec883d799 | 5,208 | py | Python | src/m7_summing.py | josephklaw/03-AccumulatorsAndFunctionsWithParameters | 7228fa5ab696737a4365f9ee3f600df503e1a645 | [
"MIT"
] | null | null | null | src/m7_summing.py | josephklaw/03-AccumulatorsAndFunctionsWithParameters | 7228fa5ab696737a4365f9ee3f600df503e1a645 | [
"MIT"
] | null | null | null | src/m7_summing.py | josephklaw/03-AccumulatorsAndFunctionsWithParameters | 7228fa5ab696737a4365f9ee3f600df503e1a645 | [
"MIT"
] | null | null | null | """
This module lets you practice the ACCUMULATOR pattern
in its simplest classic forms:
SUMMING: total = total + number
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays,
Aaron Wilkin, their colleagues, and Joseph Law.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
def main():
""" Calls the TEST functions in this module. """
run_test_sum_cosines()
run_test_sum_square_roots()
def run_test_sum_cosines():
""" Tests the sum_cosines function. """
# -------------------------------------------------------------------------
# DONE: 2. Implement this function.
# It TESTS the sum_cosines function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_cosines function:')
print('--------------------------------------------------')
#Test 1:
predicted = math.cos(0)+math.cos(1)
actual = sum_cosines(1)
print('Test 1 predicted:', predicted)
print(' Actual:', actual)
#Test 2:
predicted = math.cos(0)+math.cos(1)+math.cos(2)
actual = sum_cosines(2)
print('Test 2 predicted:', predicted)
print(' Actual:', actual)
# Test 3:
predicted = math.cos(0)+math.cos(1)+math.cos(2)+math.cos(3)
actual = sum_cosines(3)
print('Test 3 predicted:', predicted)
print(' Actual:', actual)
def sum_cosines(n):
"""
What comes in: A non-negative integer n.
What goes out: The sum of the cosines of the integers
0, 1, 2, 3, ... n, inclusive, for the given n.
Side effects: None.
Example:
If n is 3, this function returns
cos(0) + cos(1) + cos(2) + cos(3) which is about 0.13416.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
# No fair running the code of sum_cosines to GENERATE
# test cases; that would defeat the purpose of TESTING!
# -------------------------------------------------------------------------
total = 0
for k in range (n+1):
total = total + math.cos(k)
return total
def run_test_sum_square_roots():
""" Tests the sum_square_roots function. """
# -------------------------------------------------------------------------
# DONE: 4. Implement this function.
# It TESTS the sum_square_roots function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_square_roots function:')
print('--------------------------------------------------')
# Test 1:
predicted = math.sqrt(2*0) + math.sqrt(2*1) + math.sqrt (2*2)
actual = sum_square_roots(2)
print('Test 1 predicted:', predicted)
print(' Actual:', actual)
# Test 2:
predicted = math.sqrt(2 * 0) + math.sqrt(2 * 1) + math.sqrt(2 * 2) + math.sqrt(2*3)
actual = sum_square_roots(3)
print('Test 2 predicted:', predicted)
print(' Actual:', actual)
# Test 3:
predicted = math.sqrt(2 * 0) + math.sqrt(2 * 1) + math.sqrt(2 * 2) + math.sqrt(2 * 3) + math.sqrt(2*4)
actual = sum_square_roots(4)
print('Test 3 predicted:', predicted)
print(' Actual:', actual)
def sum_square_roots(n):
"""
What comes in: A non-negative integer n.
What goes out: The sum of the square roots of the integers
2, 4, 6, 8, ... 2n inclusive, for the given n.
So if n is 7, the last term of the sum is
the square root of 14 (not 7).
Side effects: None.
Example:
If n is 5, this function returns
sqrt(2) + sqrt(4) + sqrt(6) + sqrt(8) + sqrt(10),
which is about 11.854408.
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
# No fair running the code of sum_square_roots to GENERATE
# test cases; that would defeat the purpose of TESTING!
# -------------------------------------------------------------------------
total = 0
for k in range (n+1):
total = total + math.sqrt(2*k)
return total
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 39.454545 | 106 | 0.495392 |
795408edeb70aa5b743d84dbddcfefd1e48d0eb2 | 6,847 | py | Python | tests/test_utils.py | TobiasUhmann/pykeen | 82ca32a69f46f2e8a6255d7d2ce519eefbb3757e | [
"MIT"
] | 2 | 2020-11-25T14:16:39.000Z | 2021-04-15T18:36:46.000Z | tests/test_utils.py | TobiasUhmann/pykeen | 82ca32a69f46f2e8a6255d7d2ce519eefbb3757e | [
"MIT"
] | null | null | null | tests/test_utils.py | TobiasUhmann/pykeen | 82ca32a69f46f2e8a6255d7d2ce519eefbb3757e | [
"MIT"
] | 1 | 2021-04-02T13:15:58.000Z | 2021-04-02T13:15:58.000Z | # -*- coding: utf-8 -*-
"""Unittest for for global utilities."""
import string
import unittest
import numpy
import torch
from pykeen.nn import Embedding
from pykeen.utils import (
clamp_norm,
compact_mapping,
flatten_dictionary,
get_until_first_blank,
l2_regularization,
)
class L2RegularizationTest(unittest.TestCase):
"""Test L2 regularization."""
def test_one_tensor(self):
"""Test if output is correct for a single tensor."""
t = torch.ones(1, 2, 3, 4)
reg = l2_regularization(t)
self.assertAlmostEqual(float(reg), float(numpy.prod(t.shape)))
def test_many_tensors(self):
"""Test if output is correct for var-args."""
ts = []
exp_reg = 0.
for i, shape in enumerate([
(1, 2, 3),
(2, 3, 4),
(3, 4, 5),
]):
t = torch.ones(*shape) * (i + 1)
ts.append(t)
exp_reg += numpy.prod(t.shape) * (i + 1) ** 2
reg = l2_regularization(*ts)
self.assertAlmostEqual(float(reg), exp_reg)
class FlattenDictionaryTest(unittest.TestCase):
"""Test flatten_dictionary."""
def test_flatten_dictionary(self):
"""Test if the output of flatten_dictionary is correct."""
nested_dictionary = {
'a': {
'b': {
'c': 1,
'd': 2,
},
'e': 3,
},
}
expected_output = {
'a.b.c': 1,
'a.b.d': 2,
'a.e': 3,
}
observed_output = flatten_dictionary(nested_dictionary)
self._compare(observed_output, expected_output)
def test_flatten_dictionary_mixed_key_type(self):
"""Test if the output of flatten_dictionary is correct if some keys are not strings."""
nested_dictionary = {
'a': {
5: {
'c': 1,
'd': 2,
},
'e': 3,
},
}
expected_output = {
'a.5.c': 1,
'a.5.d': 2,
'a.e': 3,
}
observed_output = flatten_dictionary(nested_dictionary)
self._compare(observed_output, expected_output)
def test_flatten_dictionary_prefix(self):
"""Test if the output of flatten_dictionary is correct."""
nested_dictionary = {
'a': {
'b': {
'c': 1,
'd': 2,
},
'e': 3,
},
}
expected_output = {
'Test.a.b.c': 1,
'Test.a.b.d': 2,
'Test.a.e': 3,
}
observed_output = flatten_dictionary(nested_dictionary, prefix='Test')
self._compare(observed_output, expected_output)
def _compare(self, observed_output, expected_output):
assert not any(isinstance(o, dict) for o in expected_output.values())
assert expected_output == observed_output
class TestGetUntilFirstBlank(unittest.TestCase):
"""Test get_until_first_blank()."""
def test_get_until_first_blank_trivial(self):
"""Test the trivial string."""
s = ''
r = get_until_first_blank(s)
self.assertEqual('', r)
def test_regular(self):
"""Test a regulat case."""
s = """Broken
line.
Now I continue.
"""
r = get_until_first_blank(s)
self.assertEqual("Broken line.", r)
class EmbeddingsInCanonicalShapeTests(unittest.TestCase):
"""Test get_embedding_in_canonical_shape()."""
#: The number of embeddings
num_embeddings: int = 3
#: The embedding dimension
embedding_dim: int = 2
def setUp(self) -> None:
"""Initialize embedding."""
self.embedding = Embedding(num_embeddings=self.num_embeddings, embedding_dim=self.embedding_dim)
self.generator = torch.manual_seed(42)
self.embedding._embeddings.weight.data = torch.rand(
self.num_embeddings,
self.embedding_dim,
generator=self.generator,
)
def test_no_indices(self):
"""Test getting all embeddings."""
emb = self.embedding.get_in_canonical_shape(indices=None)
# check shape
assert emb.shape == (1, self.num_embeddings, self.embedding_dim)
# check values
exp = self.embedding(indices=None).view(1, self.num_embeddings, self.embedding_dim)
assert torch.allclose(emb, exp)
def _test_with_indices(self, indices: torch.Tensor) -> None:
"""Help tests with index."""
emb = self.embedding.get_in_canonical_shape(indices=indices)
# check shape
num_ind = indices.shape[0]
assert emb.shape == (num_ind, 1, self.embedding_dim)
# check values
exp = torch.stack([self.embedding(i) for i in indices], dim=0).view(num_ind, 1, self.embedding_dim)
assert torch.allclose(emb, exp)
def test_with_consecutive_indices(self):
"""Test to retrieve all embeddings with consecutive indices."""
indices = torch.arange(self.num_embeddings, dtype=torch.long)
self._test_with_indices(indices=indices)
def test_with_indices_with_duplicates(self):
"""Test to retrieve embeddings at random positions with duplicate indices."""
indices = torch.randint(
self.num_embeddings,
size=(2 * self.num_embeddings,),
dtype=torch.long,
generator=self.generator,
)
self._test_with_indices(indices=indices)
def test_compact_mapping(self):
"""Test ``compact_mapping()``."""
mapping = {
letter: 2 * i
for i, letter in enumerate(string.ascii_letters)
}
compacted_mapping, id_remapping = compact_mapping(mapping=mapping)
# check correct value range
self.assertEqual(set(compacted_mapping.values()), set(range(len(mapping))))
self.assertEqual(set(id_remapping.keys()), set(mapping.values()))
self.assertEqual(set(id_remapping.values()), set(compacted_mapping.values()))
def test_clamp_norm():
"""Test clamp_norm() ."""
max_norm = 1.0
gen = torch.manual_seed(42)
eps = 1.0e-06
for p in [1, 2, float('inf')]:
for _ in range(10):
x = torch.rand(10, 20, 30, generator=gen)
for dim in range(x.ndimension()):
x_c = clamp_norm(x, maxnorm=max_norm, p=p, dim=dim)
# check maximum norm constraint
assert (x_c.norm(p=p, dim=dim) <= max_norm + eps).all()
# unchanged values for small norms
norm = x.norm(p=p, dim=dim)
mask = torch.stack([(norm < max_norm)] * x.shape[dim], dim=dim)
assert (x_c[mask] == x[mask]).all()
| 31.122727 | 107 | 0.569154 |
7954094c29f9c06372b19a01e6d5c7f0b61dc5bb | 6,274 | py | Python | botstart.py | Mahas1/Guren | 6bb8947b0407435f15be1e5a12de6050bf7de95c | [
"MIT"
] | null | null | null | botstart.py | Mahas1/Guren | 6bb8947b0407435f15be1e5a12de6050bf7de95c | [
"MIT"
] | null | null | null | botstart.py | Mahas1/Guren | 6bb8947b0407435f15be1e5a12de6050bf7de95c | [
"MIT"
] | null | null | null | import asyncio
import os
import random
import logging
import contextlib
import sqlite3
import discord
from discord.ext import commands
from pathlib import Path
import motor.motor_asyncio
import io
import textwrap
import traceback
from traceback import format_exception
import utils.json_loader
from utils.mongo import Document
from utils.util import clean_code, Pag
from discord_slash import SlashCommand
cwd = Path(__file__).parents[0]
cwd = str(cwd)
print(f"{cwd}\n-----")
description = '''A clever discord bot written in python.'''
initial_extensions = ['cogs.leveling']
async def get_prefix(bot, message):
if not message.guild:
return commands.when_mentioned_or("g$")(bot, message)
try:
data = await bot.config.find(message.guild.id)
if not data or "prefix" not in data:
return commands.when_mentioned_or("g$")(bot, message)
return commands.when_mentioned_or(data["prefix"])(bot, message)
except:
return commands.when_mentioned_or("g$")(bot, message)
class NewHelpName(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
for page in self.paginator.pages:
embed = discord.Embed(description=page, color=discord.Color.random())
embed.set_thumbnail(url=bot.user.avatar_url)
embed.set_footer(text='')
await destination.send(embed=embed)
secret_file = utils.json_loader.read_json('secrets')
intents = discord.Intents.all()
bot = commands.Bot(
command_prefix=get_prefix,
description=description,
owner_id=219410026631135232,
case_insensitive=True,
intents=discord.Intents.all(),
help_command = NewHelpName()
)
slash = SlashCommand(bot, sync_commands=True, sync_on_cog_reload=True)
bot.config_token = secret_file["token"]
logging.basicConfig(level=logging.INFO)
bot.blacklisted_users = []
bot.connection_url = secret_file["mongo"]
bot.muted_users = {}
bot.cwd = cwd
bot.version = "1.0"
bot.colors = {
"WHITE": 0xFFFFFF,
"AQUA": 0x1ABC9C,
"GREEN": 0x2ECC71,
"BLUE": 0x3498DB,
"PURPLE": 0x9B59B6,
"LUMINOUS_VIVID_PINK": 0xE91E63,
"GOLD": 0xF1C40F,
"ORANGE": 0xE67E22,
"RED": 0xE74C3C,
"NAVY": 0x34495E,
"DARK_AQUA": 0x11806A,
"DARK_GREEN": 0x1F8B4C,
"DARK_BLUE": 0x206694,
"DARK_PURPLE": 0x71368A,
"DARK_VIVID_PINK": 0xAD1457,
"DARK_GOLD": 0xC27C0E,
"DARK_ORANGE": 0xA84300,
"DARK_RED": 0x992D22
}
bot.color_list = [c for c in bot.colors.values()]
@bot.event
async def on_ready():
print('Logged in as', bot.user.name)
print("Bot ID:", bot.user.id)
print('Bot latency:', bot.latency * 1000, 2)
print('Running discord.py version ' + discord.__version__)
bot.mongo = motor.motor_asyncio.AsyncIOMotorClient(str(bot.connection_url))
bot.db = bot.mongo["Guren"]
bot.config = Document(bot.db, "config")
bot.warns = Document(bot.db, "warns")
bot.mutes = Document(bot.db, "mutes")
bot.command_usage = Document(bot.db, "command_usage")
bot.reaction_roles = Document(bot.db, "reaction_roles")
print("Initialized Database\n-----")
for document in await bot.config.get_all():
print(document)
currentMutes = await bot.mutes.get_all()
for mute in currentMutes:
bot.muted_users[mute["_id"]] = mute
print(bot.muted_users)
@bot.event
async def on_guild_join(guild):
main = sqlite3.connect('Leveling/main.db')
cursor = main.cursor()
cursor.execute(f"SELECT enabled FROM glevel WHERE guild_id = '{guild.id}'")
result = cursor.fetchone()
if result is None:
sql = "INSERT INTO glevel(guild_id, enabled) VALUES(?,?)"
val = (str(guild.id), 'enabled')
cursor.execute(sql, val)
main.commit()
elif str(result[0]) == 'disabled':
sql = "UPDATE glevel SET enabled = ? WHERE guild_id = ?"
val = ('enabled', str(guild.id))
cursor.execute(sql, val)
main.commit()
cursor.close()
main.close()
@bot.command(name="eval", aliases=["exec"])
@commands.is_owner()
async def _eval(ctx, *, code):
"""Owner only command"""
code = clean_code(code)
local_variables = {
"discord": discord,
"commands": commands,
"bot": bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message
}
stdout = io.StringIO()
try:
with contextlib.redirect_stdout(stdout):
exec(
f"async def func():\n{textwrap.indent(code, ' ')}", local_variables,
)
obj = await local_variables["func"]()
result = f"{stdout.getvalue()}\n-- {obj}\n"
except Exception as e:
result = "".join(format_exception(e, e, e.__traceback__))
pager = Pag(
timeout=100,
entries=[result[i: i + 2000] for i in range(0, len(result), 2000)],
length=1,
prefix="```py\n",
suffix="```"
)
await pager.start(ctx)
@bot.event
async def on_message(message):
if message.author.bot:
return
if message.author.id in bot.blacklisted_users:
return
if message.content.startswith(f"<@!{bot.user.id}>") and \
len(message.content) == len(f"<@!{bot.user.id}>"
):
data = await bot.config.get_by_id(message.guild.id)
if not data or "prefix" not in data:
prefix = "g$"
else:
prefix = data["prefix"]
await message.channel.send(f"My prefix here is `{prefix}`", delete_after=15)
await bot.process_commands(message)
async def chng_pr():
await bot.wait_until_ready()
statuses = ["g$help", "with Yuichiro!", "with epic lines of code", "getting fancy"]
while not bot.is_closed():
status = random.choice(statuses)
await bot.change_presence(activity=discord.Game(status))
await asyncio.sleep(60)
if __name__ == "__main__":
for file in os.listdir(cwd + "/cogs"):
if file.endswith(".py") and not file.startswith("_"):
bot.load_extension(f"cogs.{file[:-3]}")
bot.load_extension("jishaku")
bot.loop.create_task(chng_pr())
bot.run(bot.config_token)
| 28.261261 | 87 | 0.639624 |
79540a9b1fb92958e29f28edbb9a1a559986a092 | 5,813 | py | Python | tests/modules/seq2vec_encoders/pytorch_seq2vec_wrapper_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 11,433 | 2017-06-27T03:08:46.000Z | 2022-03-31T18:14:33.000Z | tests/modules/seq2vec_encoders/pytorch_seq2vec_wrapper_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 4,006 | 2017-06-26T21:45:43.000Z | 2022-03-31T02:11:10.000Z | tests/modules/seq2vec_encoders/pytorch_seq2vec_wrapper_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 2,560 | 2017-06-26T21:16:53.000Z | 2022-03-30T07:55:46.000Z | import pytest
from numpy.testing import assert_almost_equal
import torch
from torch.nn import LSTM
from torch.nn.utils.rnn import pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
class TestPytorchSeq2VecWrapper(AllenNlpTestCase):
def test_get_dimensions_is_correct(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 14
assert encoder.get_input_dim() == 2
lstm = LSTM(
bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 2
def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
lstm_output = lstm(input_tensor)
encoder_output = encoder(input_tensor, None)
assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy()[:, -1, :])
def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[1, 6:, :] = 0
input_tensor[2, 4:, :] = 0
input_tensor[3, 2:, :] = 0
input_tensor[4, 1:, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, 4:] = False
mask[3, 2:] = False
mask[4, 1:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
packed_sequence = pack_padded_sequence(
input_tensor, sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
reshaped_state = state[0].transpose(0, 1)[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_forward_works_even_with_empty_sequences(self):
lstm = LSTM(
bidirectional=True, num_layers=3, input_size=3, hidden_size=11, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[2, :, :] = 0
tensor[3, 2:, :] = 0
tensor[4, :, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False
mask[3, 2:] = False
mask[4, :] = False
results = encoder(tensor, mask)
for i in (0, 1, 3):
assert not (results[i] == 0.0).data.all()
for i in (2, 4):
assert (results[i] == 0.0).data.all()
def test_forward_pulls_out_correct_tensor_with_unsorted_batches(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[0, 3:, :] = 0
input_tensor[1, 4:, :] = 0
input_tensor[2, 2:, :] = 0
input_tensor[3, 6:, :] = 0
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 6:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(
input_tensor, sequence_lengths
)
packed_sequence = pack_padded_sequence(
sorted_inputs, sorted_sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
sorted_transposed_state = state[0].transpose(0, 1).index_select(0, restoration_indices)
reshaped_state = sorted_transposed_state[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_wrapper_raises_if_batch_first_is_false(self):
with pytest.raises(ConfigurationError):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)
_ = PytorchSeq2VecWrapper(lstm)
def test_wrapper_works_with_alternating_lstm(self):
model = PytorchSeq2VecWrapper(
StackedAlternatingLstm(input_size=4, hidden_size=5, num_layers=3)
)
input_tensor = torch.randn(2, 3, 4)
mask = torch.ones(2, 3).bool()
output = model(input_tensor, mask)
assert tuple(output.size()) == (2, 5)
| 43.059259 | 100 | 0.652675 |
79540c107b095117e67e61936eb603b7d19b3cec | 1,018 | py | Python | src/events/EventMessageBuilder.py | ITAnalyst-JU/process-logger | a51d4604b2dc3047dec9adfec96334ff20a3782f | [
"MIT"
] | null | null | null | src/events/EventMessageBuilder.py | ITAnalyst-JU/process-logger | a51d4604b2dc3047dec9adfec96334ff20a3782f | [
"MIT"
] | null | null | null | src/events/EventMessageBuilder.py | ITAnalyst-JU/process-logger | a51d4604b2dc3047dec9adfec96334ff20a3782f | [
"MIT"
] | null | null | null | import html
import json
def _str(s): return html.escape(str(s))
class EventMessageBuilder:
def __init__(self, event_name, content=None):
self.event_name = str(event_name)
self.content = str(content) if content is not None else ''
self.attrs = {}
def attributes(self, **attrs):
r = EventMessageBuilder(self.event_name, self.content)
r.attrs = {**self.attrs, **attrs}
return r
def to_xml(self):
ret = f'<{self.event_name}'
for k, v in self.attrs.items():
if v is not None:
ret += f' {str(k)}="{str(v)}"'
ret += f'>{_str(self.content)}</{self.event_name}>\n'
return ret
def to_json(self):
j = { 'type': self.event_name.upper()
, 'content': _str(self.content)
, 'attributes': [] }
for k, v in self.attrs.items():
if v is not None:
j['attributes'].append({'name': str(k), 'value': str(v)})
return json.dumps(j)
| 28.277778 | 73 | 0.543222 |
79540db7343cd37c04169f2c2a9534f0c0ea7d5c | 1,187 | py | Python | code/math_examples.py | rustam-fork/ml-course-uz | e1554d4c69bf0e421aa596d77aab65639df1ff73 | [
"MIT"
] | 21 | 2018-01-05T09:24:49.000Z | 2021-04-24T03:25:25.000Z | code/math_examples.py | rustam-fork/ml-course-uz | e1554d4c69bf0e421aa596d77aab65639df1ff73 | [
"MIT"
] | 1 | 2019-11-11T18:34:53.000Z | 2019-11-13T15:56:10.000Z | code/math_examples.py | rustam-fork/ml-course-uz | e1554d4c69bf0e421aa596d77aab65639df1ff73 | [
"MIT"
] | 13 | 2018-01-05T10:26:47.000Z | 2022-01-25T07:48:33.000Z | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def draw_parabola(steps=50):
x = np.linspace(-4, 4, steps)
plt.plot(x, x ** 2)
plt.axvline(x=0, color='b', linestyle='dashed')
def draw_paraboloid(steps=50):
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, steps)
y = np.linspace(-1, 1, steps)
X, Y = np.meshgrid(x, y)
Z = X ** 2 + Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
def draw_mishra_bird():
fig = plt.figure(figsize=(14, 10))
x = np.arange(-10, 1, 0.1)
y = np.arange(-6, 0.5, 0.1)
X, Y = np.meshgrid(x, y)
ax = plt.gca(projection='3d')
Z = np.sin(Y) * np.exp((1 - np.cos(X)) ** 2) + np.cos(X) * np.cos(X) * np.exp((1 - np.sin(Y)) ** 2) + (X - Y) ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
ax.view_init(20, -60)
def draw_hyperbolic_paraboloid():
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, 50)
y = np.linspace(-1, 1, 50)
X, Y = np.meshgrid(x, y)
Z = X ** 2 - Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) | 27.604651 | 118 | 0.57877 |
79540e2cd3e0b225ae8438ea4529e9123555eb1f | 76 | py | Python | tests/fixtures/.marten/manual.py | nick-allen/python-marten | 0351ba590311ca09bc3b3184678a2e6cf00aa2d9 | [
"MIT"
] | null | null | null | tests/fixtures/.marten/manual.py | nick-allen/python-marten | 0351ba590311ca09bc3b3184678a2e6cf00aa2d9 | [
"MIT"
] | null | null | null | tests/fixtures/.marten/manual.py | nick-allen/python-marten | 0351ba590311ca09bc3b3184678a2e6cf00aa2d9 | [
"MIT"
] | null | null | null | __author__ = 'Nick Allen <nick.allen.cse@gmail.com>'
MARTEN_FIXTURE = False | 25.333333 | 52 | 0.763158 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.