input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
col + 1, key['enabled'], cell_format_center)
xls_settings.write(row, col + 2, key['learn'], cell_format_center)
row += 1
row += 3
##-------------- Print Evasion ----------------#
xls_settings.write(row, col, 'Evasion Settings')
row += 2
xls_settings.write(row, col, 'Evasion Techniques')
xls_settings.write(row, col + 1, 'Enabled', cell_format_center)
xls_settings.write(row, col + 2, 'Learn', cell_format_center)
for key in evasions:
xls_settings.write(row, col, key['name'])
xls_settings.write(row, col + 1, key['enabled'], cell_format_center)
xls_settings.write(row, col + 2, key['learn'], cell_format_center)
row += 1
#---------------- Print IP Intelligence ----------------#
row += 3
xls_settings.write(row, col, 'IP Intelligence Settings')
row += 1
xls_settings.write(row, col, 'IPI Enabled')
xls_settings.write(row, col + 1, ipi)
row += 2
xls_settings.write(row, col, 'Name')
xls_settings.write(row, col + 1, 'Alarm', cell_format_center)
xls_settings.write(row, col + 2, 'Block', cell_format_center)
row += 1
for key in ipi_categories:
xls_settings.write(row, col, key['name'])
xls_settings.write(row, col + 1, key['alarm'], cell_format_center)
xls_settings.write(row, col + 2, key['block'], cell_format_center)
row += 1
workbook.close()
def word_file_results(document, results, customer_name, overview):
num_of_suggestions = 0
document.add_heading(overview['name'], level=2)
document.add_paragraph('The following section analyzing the configuration of the ASM policy')
document.add_heading('Overview', level=3)
document.add_paragraph('The following table provides a quick view on the entities configured this policy and their enforced status')
table = document.add_table(rows=8, cols=3)
table.style = 'Table Grid'
table.cell(0,0).text = 'Entities'
table.cell(1,0).text = 'File Types'
table.cell(2,0).text = 'URLs'
table.cell(3,0).text = 'Parameters'
table.cell(4,0).text = 'Signatures'
table.cell(5,0).text = 'Cookies'
table.cell(6,0).text = 'HTTP Compliance'
table.cell(7,0).text = 'Evasion'
#------------------------------------------------------------------
table.cell(0,1).text = 'Total Configured'
table.cell(1,1).text = str(results['file_type_total'])
table.cell(2,1).text = str(results['urls_total'])
table.cell(3,1).text = str(results['param_total'])
table.cell(4,1).text = str(results['sig_total'])
table.cell(5,1).text = str(results['cookies_total'])
table.cell(6,1).text = str(results['compliance_total'])
table.cell(7,1).text = str(results['evasion_total'])
#------------------------------------------------------------------
table.cell(0,2).text = 'Not Enforced'
table.cell(1,2).text = str(results['file_type_not_enforced'])
table.cell(2,2).text = str(results['urls_not_enforced'])
table.cell(3,2).text = str(results['param_not_enforced'])
table.cell(4,2).text = str(results['sig_not_enforced'])
table.cell(5,2).text = str(results['cookies_not_enforced'])
table.cell(6,2).text = str(results['compliance_not_enforced'])
table.cell(7,2).text = str(results['evasion_not_enforced'])
document.add_paragraph()
document.add_paragraph()
document.save("reports/F5 ASM - Config Review.docx")
def word_file_overview (document, overview, customer_name, suggestions):
violations = ["IP is blacklisted","Malformed XML data","Malformed JSON data","Disallowed file upload content detected","Virus detected","Brute Force: Maximum login attempts are exceeded"]
num_of_suggestions = 0
document.add_heading('General Settings', level=3)
table = document.add_table(rows=17, cols=2)
table.style = 'Table Grid'
table.cell(0,0).text = 'Settings'
table.cell(1,0).text = 'Policy Name'
table.cell(2,0).text = 'Partition'
table.cell(3,0).text = 'Enforcement mode'
table.cell(4,0).text = 'Applied to vServers'
table.cell(5,0).text = 'Application Language'
table.cell(6,0).text = 'Brute force Protection'
table.cell(7,0).text = 'DataGuard'
table.cell(8,0).text = 'Antivirus'
table.cell(9,0).text = 'Created By'
table.cell(10,0).text = 'Created Date'
table.cell(11,0).text = 'Last Updated'
table.cell(12,0).text = 'Policy is case sensitive'
table.cell(13,0).text = 'Mask Credit Card Numbers in Request Log'
table.cell(14,0).text = 'Trust XFF'
table.cell(15,0).text = 'Custom XFF'
table.cell(16,0).text = 'Trigger ASM iRule Events'
#------------------------------------------------------------------
table.cell(0,1).text = 'Values'
table.cell(1,1).text = overview['name']
table.cell(2,1).text = overview['partition']
table.cell(3,1).text = overview['enforcementMode']
table.cell(4,1).text = '\n'.join(overview['virtualServers'])
table.cell(5,1).text = overview['applicationLanguage']
table.cell(6,1).text = overview['brute_enabled'] + " (on " + str(overview['Login_pages_totalItems']) + " login pages)"
table.cell(7,1).text = overview['data_guard_enabled']
table.cell(8,1).text = overview['inspectHttpUploads']
table.cell(9,1).text = overview['creatorName']
table.cell(10,1).text = overview['createdDatetime']
table.cell(11,1).text = overview['lastUpdateMicros']
table.cell(12,1).text = overview['caseInsensitive']
table.cell(13,1).text = overview['maskCreditCardNumbersInRequest']
table.cell(14,1).text = overview['trustXff']
table.cell(15,1).text = '\n'.join(overview['customXffHeaders'])
table.cell(16,1).text = overview['triggerAsmIruleEvent']
document.add_paragraph()
document.add_paragraph()
for key in suggestions:
if key['severity'] == "error" and key['section']=="Overview":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "warning" and key['section']=="Overview":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "info" and key['section']=="Overview":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['txt'] in violations and key['section']=="Blocking Settings":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
if num_of_suggestions==0:
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture('low.png',width=Inches(.25), height=Inches(.25))
r.add_text(' No suggestions found.')
document.add_paragraph()
document.add_paragraph()
document.save("reports/F5 ASM - Config Review.docx")
def word_file_learning (document, customer_name, policy_builder, whitelist, suggestions):
num_of_suggestions = 0
document.add_heading('Learning Configuration', level=3)
document.add_paragraph('The following table shows the Learning configuration of the ASM Policy')
table = document.add_table(rows=21, cols=2)
table.style = 'Table Grid'
table.cell(0,0).text = 'Settings'
table.cell(1,0).text = 'Learning Mode'
table.cell(2,0).text = 'Trust All IPs'
table.cell(3,0).text = 'Trusted sources for learning'
table.cell(4,0).text = 'Trusted hours for learning'
table.cell(5,0).text = 'Untrusted sources for learning'
table.cell(6,0).text = 'Untrusted hours for learning'
table.cell(7,0).text = 'Full Inspection'
table.cell(8,0).text = 'Learn Inactive Entities'
table.cell(9,0).text = 'Learn New File Types'
table.cell(10,0).text = 'Max Learned File Types'
table.cell(11,0).text = 'Learn New URLs'
table.cell(12,0).text = 'Max Learned URLs'
table.cell(13,0).text = 'Learn New Parameters'
table.cell(14,0).text = 'Max Learned Parameters'
table.cell(15,0).text = 'Parameter Learning Level'
table.cell(16,0).text = 'Learn Integer Values'
table.cell(17,0).text = 'Classify Value Content'
table.cell(18,0).text = 'Learn New Cookies'
table.cell(19,0).text = 'Max Learned Cookies'
table.cell(20,0).text = 'Learn Redirection Domains'
#------------------------------------------------------------------
table.cell(0,1).text = 'Values'
table.cell(1,1).text = policy_builder['learningMode']
table.cell(2,1).text = policy_builder['trustAllIps']
table.cell(3,1).text = str(policy_builder['trusted_loosen_source'])
table.cell(4,1).text = str(policy_builder['trusted_loosen_hours'])
table.cell(5,1).text = str(policy_builder['untrusted_loosen_source'])
table.cell(6,1).text = str(policy_builder['untrusted_loosen_hours'])
table.cell(7,1).text = policy_builder['enableFullPolicyInspection']
table.cell(8,1).text = policy_builder['learnInactiveEntities']
table.cell(9,1).text = policy_builder['learnExplicitFiletypes']
table.cell(10,1).text = str(policy_builder['maximumFileTypes'])
table.cell(11,1).text = policy_builder['learnExplicitUrls']
table.cell(12,1).text = str(policy_builder['maximumUrls'])
table.cell(13,1).text = policy_builder['learnExplicitParameters']
table.cell(14,1).text = str(policy_builder['maximumParameters'])
table.cell(15,1).text = policy_builder['parameterLearningLevel']
table.cell(16,1).text = policy_builder['parametersIntegerValue']
table.cell(17,1).text = policy_builder['classifyParameters']
table.cell(18,1).text = policy_builder['learnExplicitCookies']
table.cell(19,1).text = str(policy_builder['maximumCookies'])
table.cell(20,1).text = policy_builder['learnExplicitRedirectionDomains']
document.add_paragraph()
document.add_paragraph()
for key in suggestions:
if key['severity'] == "error" and key['section']=="Policy Builder":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "warning" and key['section']=="Policy Builder":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "info" and key['section']=="Policy Builder":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
if num_of_suggestions==0:
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture('low.png',width=Inches(.25), height=Inches(.25))
r.add_text(' No suggestions found.')
document.add_paragraph()
document.add_paragraph()
document.save("reports/F5 ASM - Config Review.docx")
def word_file_compliance (document, customer_name, blocking_settings, compliance, suggestions):
violations = ["HTTP protocol compliance failed"]
num_of_suggestions = 0
document.add_heading('Compliance Violations', level=3)
document.add_paragraph('The following section shows the blocking settings configuration for HTTP Compliance protection.')
table = document.add_table(rows=1, cols=4)
table.style = 'Table_column'
table.cell(0,0).text = 'Violation Name'
table.cell(0,1).text = 'Learn'
table.cell(0,2).text = 'Alarm'
table.cell(0,3).text = 'Block'
for key in blocking_settings:
if key['name'] in violations:
cells = table.add_row().cells
cells[0].text = key['name']
cells[1].text = key['learn']
cells[2].text = key['alarm']
cells[3].text = key['block']
set_column_width(table.columns[0], Cm(9.5))
document.add_paragraph()
document.add_paragraph('The following section summarizes the configuration for HTTP Compliance protection.')
table = document.add_table(rows=1, cols=3)
table.style = 'Table_rows'
table.cell(0,0).text = 'HTTP Compliance Violation'
table.cell(0,1).text = 'Enabled'
table.cell(0,2).text = 'Learn'
for key in compliance:
cells = table.add_row().cells
cells[0].text = key['name']
cells[1].text = key['enabled']
cells[2].text = key['learn']
set_column_width(table.columns[0], Cm(12.5))
document.add_paragraph()
document.add_paragraph()
for key in suggestions:
if key['severity'] == "error" and key['section']=="HTTP Compliance":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "warning" and key['section']=="HTTP Compliance":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "info" and key['section']=="HTTP Compliance":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['txt'] in violations and key['section']=="Blocking Settings":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
if num_of_suggestions==0:
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture('low.png',width=Inches(.25), height=Inches(.25))
r.add_text(' No suggestions found.')
document.add_paragraph()
document.add_paragraph()
document.save("reports/F5 ASM - Config Review.docx")
def word_file_evasion (document, customer_name, blocking_settings, evasions, suggestions):
violations = ["Evasion technique detected"]
num_of_suggestions = 0
document.add_heading('Evasion Techniques', level=3)
document.add_paragraph('The Evasion technique detected violation is triggered when the BIG-IP ASM system fails to normalize requests. Normalization is the process of decoding requests that are encoded. The system needs to perform normalization because some applications send requests that contain different types of encoded escapes that the BIG-IP ASM system needs to interpret before doing any further processing')
document.add_paragraph('The following section shows the blocking settings configuration for Evasion protection.')
table = document.add_table(rows=1, cols=4)
table.style = 'Table_column'
table.cell(0,0).text = 'Violation Name'
table.cell(0,1).text = 'Learn'
table.cell(0,2).text = 'Alarm'
table.cell(0,3).text = 'Block'
for key in blocking_settings:
if key['name'] in violations:
cells = table.add_row().cells
cells[0].text = key['name']
cells[1].text = key['learn']
cells[2].text = key['alarm']
cells[3].text = key['block']
set_column_width(table.columns[0], Cm(9.5))
document.add_paragraph()
document.add_paragraph('The following section summarizes the configuration for Evasion techiques protection.')
table = document.add_table(rows=1, cols=3)
table.style = 'Table_rows'
table.cell(0,0).text = 'Evasion Techniques'
table.cell(0,1).text = 'Enabled'
table.cell(0,2).text = 'Learn'
for key in evasions:
cells = table.add_row().cells
cells[0].text = key['name']
cells[1].text = key['enabled']
cells[2].text = key['learn']
set_column_width(table.columns[0], Cm(12.5))
document.add_paragraph()
document.add_paragraph()
for key in suggestions:
if key['severity'] == "error" and key['section']=="Evasion":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "warning" and key['section']=="Evasion":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "info" and key['section']=="Evasion":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['txt'] in violations and key['section']=="Blocking Settings":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
if num_of_suggestions==0:
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture('low.png',width=Inches(.25), height=Inches(.25))
r.add_text(' No suggestions found.')
document.add_paragraph()
document.add_paragraph()
document.save("reports/F5 ASM - Config Review.docx")
def word_file_signatures (document, customer_name, signatures_overview, signature_sets, urls, headers, parameters, cookies, suggestions):
num_of_suggestions = 0
document.add_heading('Signatures', level=3)
document.add_paragraph('This section of the report summarizes the configuration done for Attack Signature protection.')
p = document.add_paragraph()
runner = p.add_run("Signature Staging: ")
runner.bold = True
p.add_run (signatures_overview['signatureStaging'])
p = document.add_paragraph()
runner = p.add_run("Place New Signature in Staging: ")
runner.bold = True
p.add_run (signatures_overview['placeSignaturesInStaging'])
document.add_paragraph()
table = document.add_table(rows=1, cols=4)
table.style = 'Table_column'
table.cell(0,0).text = 'Signature Set Name'
table.cell(0,1).text = 'Learn'
table.cell(0,2).text = 'Alarm'
table.cell(0,3).text = 'Block'
for key in signature_sets:
cells = table.add_row().cells
cells[0].text = key['name']
cells[1].text = key['learn']
cells[2].text = key['alarm']
cells[3].text = key['block']
document.add_paragraph()
p = document.add_paragraph()
runner = p.add_run("Total Signatures: ")
runner.bold = True
p.add_run (str(signatures_overview['total']))
p = document.add_paragraph()
runner = p.add_run("Staging Signatures: ")
runner.bold = True
p.add_run (str(signatures_overview['staging']))
p = document.add_paragraph()
runner = p.add_run("Disabled Signatures: ")
runner.bold = True
p.add_run (str(signatures_overview['enabled']))
p = document.add_paragraph()
runner = p.add_run("Latest Signature update: ")
runner.bold = True
p.add_run (signatures_overview['latest_sig_update'])
document.add_paragraph()
document.add_heading('Signature Overrides', level=4)
document.add_paragraph('This section shows all entities that have Signatures Overrides and therefore these attack signatures are not being applied.')
table = document.add_table(rows=1, cols=3)
table.style = 'Table_rows'
table.cell(0,0).text = 'Type'
table.cell(0,1).text = 'Name'
table.cell(0,2).text = 'Signature Overrides'
for key in urls:
if len(key['signatureOverrides'])>1 and key['attackSignaturesCheck'] == "Yes":
cells = table.add_row().cells
cells[0].text = "URL"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
else:
if (key['signatureOverrides'][0]!="None") and key['attackSignaturesCheck'] == "Yes":
cells = table.add_row().cells
cells[0].text = "URL"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
for key in parameters:
if len(key['signatureOverrides'])>1 and key['attackSignaturesCheck'] == "Yes":
cells = table.add_row().cells
cells[0].text = "Parameter"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
else:
if (key['signatureOverrides'][0]!="None") and key['attackSignaturesCheck'] == "Yes":
cells = table.add_row().cells
cells[0].text = "Parameter"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
for key in headers:
if len(key['signatureOverrides'])>1 and key['checkSignatures'] == "Yes":
cells = table.add_row().cells
cells[0].text = "Header"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
else:
if (key['signatureOverrides'][0]!="None") and key['checkSignatures'] == "Yes":
cells = table.add_row().cells
cells[0].text = "Header"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
for key in cookies:
if len(key['signatureOverrides'])>1 and key['attackSignaturesCheck'] == "Yes":
cells = table.add_row().cells
cells[0].text = "Cookie"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
else:
if (key['signatureOverrides'][0]!="None") and key['attackSignaturesCheck'] == "Yes":
cells = table.add_row().cells
cells[0].text = "Cookie"
cells[1].text = key['name']
cells[2].text = '\n'.join(key['signatureOverrides'])
set_column_width(table.columns[0], Cm(2))
set_column_width(table.columns[2], Cm(7.5))
set_column_width(table.columns[2], Cm(9))
document.add_paragraph()
for key in suggestions:
if key['severity'] == "error" and key['section']=="Signatures":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "warning" and key['section']=="Signatures":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
for key in suggestions:
if key['severity'] == "info" and key['section']=="Signatures":
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture(key['severity']+'.png',width=Inches(.25), height=Inches(.25))
r.add_text(key['txt'])
num_of_suggestions +=1
if num_of_suggestions==0:
p=document.add_paragraph(style="Intense Quote")
r = p.add_run()
r.add_picture('low.png',width=Inches(.25), height=Inches(.25))
r.add_text(' No suggestions found.')
document.add_paragraph()
document.add_paragraph()
document.save("reports/F5 ASM - Config Review.docx")
def | |
<reponame>zxkjack123/pypact<gh_stars>10-100
import os
import pypact as pp
from pypact.printlib.printlib5 import SpectralData
from tests.testerbase import Tester, REFERENCE_DIR
class PrintLib5UnitTest(Tester):
def setUp(self):
self.filename = os.path.join(os.path.join(REFERENCE_DIR), "printlib5.out")
def tearDown(self):
pass
def test_default(self):
pl = pp.PrintLib5()
self.assertEqual(0, len(pl), "Assert no data")
self.assertEqual(0, len(pl.spectral_data), "Assert no mean data")
def test_fispact_deserialize(self):
pl = pp.PrintLib5()
self.assertEqual(0, len(pl), "Assert no data")
self.assertEqual(0, len(pl.spectral_data), "Assert no mean data")
# due to a bug in FISPACT-II and/or nuclear data
# it sometimes prints out 0 for number of spectra
# in mean section but then has no line entry - hence the mismatch
# between filerecord nr_of_entries and len(pl.spectral_data)
fr = pp.PrintLib5FileRecord(self.filename)
self.assertEqual(8422, fr.nr_of_entries, "Assert number of mean entries")
pl = pp.PrintLib5()
pl.fispact_deserialize(fr)
self.assertEqual(8422, len(pl), "Assert data")
self.assertEqual(8422, len(pl.spectral_data), "Assert mean data")
self.assertEqual(3875, pl.nr_of_zais, "Assert number of ZAIs")
def test_reader(self):
with pp.PrintLib5Reader(self.filename) as pl:
self.assertEqual(8422, len(pl), "Assert data")
self.assertEqual(8422, len(pl.spectral_data), "Assert mean data")
self.assertEqual(3875, pl.nr_of_zais, "Assert number of ZAIs")
s = pl.spectral_data[0]
self.assertEqual("H 3", s.name, "Assert name")
self.assertEqual(10030, s.zai, "Assert zai")
self.assertEqual(3, s.number, "Assert number")
self.assertEqual("beta", s.type, "Assert type")
self.assertEqual(1, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(5.70740E+03, s.mean_energy, "Assert mean_energy")
self.assertEqual(1.84397E+00, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.0, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
s = pl.spectral_data[130]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("gamma", s.type, "Assert type")
self.assertEqual(15, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(5.47120E+05, s.mean_energy, "Assert mean_energy")
self.assertEqual(3.77412E+03, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(8.95500E-01, s.mean_normalisation, "Assert normalisation")
self.assertEqual(6.00000E-04, s.mean_normalisation_unc, "Assert normalisation_unc")
s = pl.spectral_data[131]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("beta", s.type, "Assert type")
self.assertEqual(7, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(2.34181E+06, s.mean_energy, "Assert mean_energy")
self.assertEqual(1.08633E+05, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.00000E+00, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
s = pl.spectral_data[132]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("e-", s.type, "Assert type")
self.assertEqual(4, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(2.07265E+01, s.mean_energy, "Assert mean_energy")
self.assertEqual(2.07265E+00, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.00000E+00, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
s = pl.spectral_data[133]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("x", s.type, "Assert type")
self.assertEqual(1, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(6.31343E-04, s.mean_energy, "Assert mean_energy")
self.assertEqual(6.31343E-05, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.00000E+00, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
# last entry
s = pl.spectral_data[-1]
self.assertEqual("Rg272", s.name, "Assert name")
self.assertEqual(1112720, s.zai, "Assert zai")
self.assertEqual(3875, s.number, "Assert number")
self.assertEqual("no spectral data", s.type, "Assert type")
self.assertEqual(0, s.nr_of_lines, "Assert default")
self.assertEqual(0.0, s.mean_energy, "Assert default")
self.assertEqual(0.0, s.mean_energy_unc, "Assert default")
self.assertEqual(0.0, s.mean_normalisation, "Assert default")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert default")
def test_reader_spectral_lines(self):
with pp.PrintLib5Reader(self.filename) as pl:
self.assertEqual(8422, len(pl), "Assert data")
self.assertEqual(8422, len(pl.spectral_data), "Assert mean data")
self.assertEqual(3875, pl.nr_of_zais, "Assert number of ZAIs")
s = pl.spectral_data[0]
self.assertEqual("H 3", s.name, "Assert name")
self.assertEqual(10030, s.zai, "Assert zai")
self.assertEqual(3, s.number, "Assert number")
self.assertEqual("beta", s.type, "Assert type")
self.assertEqual(1, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(5.70740E+03, s.mean_energy, "Assert mean_energy")
self.assertEqual(1.84397E+00, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.0, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
self.assertEqual([1.85710E+04], s.lines.energies, "Assert line energies")
self.assertEqual([6.00000E+00], s.lines.energies_unc, "Assert line energies uncert")
self.assertEqual([1.00000E+00], s.lines.intensities, "Assert line intensities")
self.assertEqual([0.0], s.lines.intensities_unc, "Assert line intensities uncert")
self.assertEqual([1.00000E+00], s.lines.norms, "Assert line norms")
self.assertEqual([0.0], s.lines.norms_unc, "Assert line norms uncert")
s = pl.spectral_data[130]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("gamma", s.type, "Assert type")
self.assertEqual(15, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(5.47120E+05, s.mean_energy, "Assert mean_energy")
self.assertEqual(3.77412E+03, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(8.95500E-01, s.mean_normalisation, "Assert normalisation")
self.assertEqual(6.00000E-04, s.mean_normalisation_unc, "Assert normalisation_unc")
line_energies = [
3.50730E+05,
1.39518E+06,
1.74591E+06,
1.89040E+06,
1.98970E+06,
2.78000E+06,
2.79416E+06,
3.38490E+06,
3.53330E+06,
3.73560E+06,
3.88400E+06,
4.17510E+06,
4.33390E+06,
4.52590E+06,
4.68460E+06
]
line_energies_unc = [
1.00000E+01,
3.00000E+01,
2.00000E+01,
2.00000E+02,
2.00000E+02,
2.00000E+02,
4.00000E+01,
2.00000E+02,
2.00000E+02,
2.00000E+02,
2.00000E+02,
2.00000E+02,
2.00000E+02,
2.00000E+02,
2.00000E+02
]
line_intensities = [
1.00000E+00,
1.71300E-01,
8.64000E-03,
2.03000E-05,
2.50000E-06,
1.77000E-05,
2.03000E-05,
4.20000E-06,
3.26000E-05,
2.78000E-05,
1.07000E-05,
3.56500E-04,
5.30600E-04,
1.06000E-04,
3.12700E-04
]
line_intensities_unc = [
0.00000E+00,
3.00000E-03,
1.50000E-04,
3.00000E-06,
3.00000E-07,
1.70000E-06,
3.00000E-06,
4.00000E-07,
1.70000E-06,
2.60000E-06,
1.40000E-06,
6.50000E-06,
1.35000E-05,
3.30000E-06,
1.09000E-05
]
line_norms = [
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01,
8.95500E-01
]
line_norms_unc = [
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04,
6.00000E-04
]
self.assertEqual(line_energies, s.lines.energies, "Assert line energies")
self.assertEqual(line_energies_unc, s.lines.energies_unc, "Assert line energies uncert")
self.assertEqual(line_intensities, s.lines.intensities, "Assert line intensities")
self.assertEqual(line_intensities_unc, s.lines.intensities_unc, "Assert line intensities uncert")
self.assertEqual(line_norms, s.lines.norms, "Assert line norms")
self.assertEqual(line_norms_unc, s.lines.norms_unc, "Assert line norms uncert")
s = pl.spectral_data[131]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("beta", s.type, "Assert type")
self.assertEqual(7, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(2.34181E+06, s.mean_energy, "Assert mean_energy")
self.assertEqual(1.08633E+05, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.00000E+00, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
line_energies = [
9.99600E+05,
1.15830E+06,
1.80020E+06,
1.94860E+06,
3.93830E+06,
5.33350E+06,
5.68420E+06
]
line_energies_unc = [
2.00000E+03,
2.00000E+03,
2.00000E+03,
2.00000E+03,
1.80000E+03,
1.80000E+03,
1.80000E+03
]
line_intensities = [
7.70000E-04,
4.30000E-04,
3.90000E-05,
3.10000E-05,
1.61000E-01,
7.41000E-01,
9.60000E-02
]
line_intensities_unc = [
2.00000E-05,
1.00000E-05,
3.00000E-06,
3.00000E-06,
1.00000E-02,
3.00000E-02,
3.00000E-02
]
line_norms = [
1.00000E+00,
1.00000E+00,
1.00000E+00,
1.00000E+00,
1.00000E+00,
1.00000E+00,
1.00000E+00
]
line_norms_unc = [
0.00000E+00,
0.00000E+00,
0.00000E+00,
0.00000E+00,
0.00000E+00,
0.00000E+00,
0.00000E+00
]
self.assertEqual(line_energies, s.lines.energies, "Assert line energies")
self.assertEqual(line_energies_unc, s.lines.energies_unc, "Assert line energies uncert")
self.assertEqual(line_intensities, s.lines.intensities, "Assert line intensities")
self.assertEqual(line_intensities_unc, s.lines.intensities_unc, "Assert line intensities uncert")
self.assertEqual(line_norms, s.lines.norms, "Assert line norms")
self.assertEqual(line_norms_unc, s.lines.norms_unc, "Assert line norms uncert")
s = pl.spectral_data[132]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("e-", s.type, "Assert type")
self.assertEqual(4, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(2.07265E+01, s.mean_energy, "Assert mean_energy")
self.assertEqual(2.07265E+00, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.00000E+00, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
line_energies = [
8.30000E+02,
3.49863E+05,
3.50712E+05,
3.50730E+05
]
line_energies_unc = [
4.15000E+01,
1.74932E+04,
1.75356E+04,
1.75365E+04
]
line_intensities = [
5.47770E-05,
5.55210E-05,
3.13425E-06,
4.47750E-07
]
line_intensities_unc = [
5.47770E-06,
5.55210E-06,
3.13425E-07,
4.47750E-08
]
line_norms = [
1.00000E+00,
1.00000E+00,
1.00000E+00,
1.00000E+00
]
line_norms_unc = [
0.00000E+00,
0.00000E+00,
0.00000E+00,
0.00000E+00
]
self.assertEqual(line_energies, s.lines.energies, "Assert line energies")
self.assertEqual(line_energies_unc, s.lines.energies_unc, "Assert line energies uncert")
self.assertEqual(line_intensities, s.lines.intensities, "Assert line intensities")
self.assertEqual(line_intensities_unc, s.lines.intensities_unc, "Assert line intensities uncert")
self.assertEqual(line_norms, s.lines.norms, "Assert line norms")
self.assertEqual(line_norms_unc, s.lines.norms_unc, "Assert line norms uncert")
s = pl.spectral_data[133]
self.assertEqual("F 21", s.name, "Assert name")
self.assertEqual(90210, s.zai, "Assert zai")
self.assertEqual(106, s.number, "Assert number")
self.assertEqual("x", s.type, "Assert type")
self.assertEqual(1, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(6.31343E-04, s.mean_energy, "Assert mean_energy")
self.assertEqual(6.31343E-05, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(1.00000E+00, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
self.assertEqual([8.48600E+02], s.lines.energies, "Assert line energies")
self.assertEqual([1.69720E+01], s.lines.energies_unc, "Assert line energies uncert")
self.assertEqual([7.43981E-07], s.lines.intensities, "Assert line intensities")
self.assertEqual([7.43981E-08], s.lines.intensities_unc, "Assert line intensities uncert")
self.assertEqual([1.0], s.lines.norms, "Assert line norms")
self.assertEqual([0.0], s.lines.norms_unc, "Assert line norms uncert")
# last non zero entry
s = pl.spectral_data[8333]
self.assertEqual("Lr257", s.name, "Assert name")
self.assertEqual(1032570, s.zai, "Assert zai")
self.assertEqual(3787, s.number, "Assert number")
self.assertEqual("alpha", s.type, "Assert type")
self.assertEqual(2, s.nr_of_lines, "Assert default")
self.assertEqual(8.84930E+06, s.mean_energy, "Assert default")
self.assertEqual(1.01144E+04, s.mean_energy_unc, "Assert default")
self.assertEqual(1.00000E-02, s.mean_normalisation, "Assert default")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert default")
self.assertEqual([8.79600E+06, 8.86100E+06], s.lines.energies, "Assert line energies")
self.assertEqual([1.30000E+04, 1.20000E+04], s.lines.energies_unc, "Assert line energies uncert")
self.assertEqual([1.80000E+01, 8.20000E+01], s.lines.intensities, "Assert line intensities")
self.assertEqual([2.00000E+00, 2.00000E+00], s.lines.intensities_unc, "Assert line intensities uncert")
self.assertEqual([1.00000E-02, 1.00000E-02], s.lines.norms, "Assert line norms")
self.assertEqual([0.0, 0.0], s.lines.norms_unc, "Assert line norms uncert")
# last entry
s = pl.spectral_data[-1]
self.assertEqual("Rg272", s.name, "Assert name")
self.assertEqual(1112720, s.zai, "Assert zai")
self.assertEqual(3875, s.number, "Assert number")
self.assertEqual("no spectral data", s.type, "Assert type")
self.assertEqual(0, s.nr_of_lines, "Assert default")
self.assertEqual(0.0, s.mean_energy, "Assert default")
self.assertEqual(0.0, s.mean_energy_unc, "Assert default")
self.assertEqual(0.0, s.mean_normalisation, "Assert default")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert default")
self.assertEqual([], s.lines.energies, "Assert line energies")
self.assertEqual([], s.lines.energies_unc, "Assert line energies uncert")
self.assertEqual([], s.lines.intensities, "Assert line intensities")
self.assertEqual([], s.lines.intensities_unc, "Assert line intensities uncert")
self.assertEqual([], s.lines.norms, "Assert line norms")
self.assertEqual([], s.lines.norms_unc, "Assert line norms uncert")
def test_default_spectra_data(self):
s = SpectralData()
self.assertEqual("", s.name, "Assert default")
self.assertEqual(0, s.zai, "Assert default")
self.assertEqual(0, s.number, "Assert default")
self.assertEqual("", s.type, "Assert default")
self.assertEqual(0, s.nr_of_lines, "Assert default")
self.assertEqual(0.0, s.mean_energy, "Assert default")
self.assertEqual(0.0, s.mean_energy_unc, "Assert default")
self.assertEqual(0.0, s.mean_normalisation, "Assert default")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert default")
self.assertEqual(0, len(s.lines), "Assert no line data")
def test_default_spectra_data_deserialize(self):
s = SpectralData()
linedump = [' He 8 20080 13 gamma 1 8.63104E+05 +- 9.80839E+03 8.80000E-01 +- 0.00000E+00\n']
s.fispact_deserialize(linedump)
self.assertEqual("He 8", s.name, "Assert name")
self.assertEqual(20080, s.zai, "Assert zai")
self.assertEqual(13, s.number, "Assert number")
self.assertEqual("gamma", s.type, "Assert type")
self.assertEqual(1, s.nr_of_lines, "Assert nr_of_lines")
self.assertEqual(8.63104E+05, s.mean_energy, "Assert mean_energy")
self.assertEqual(9.80839E+03, s.mean_energy_unc, "Assert mean_energy_unc")
self.assertEqual(8.80000E-01, s.mean_normalisation, "Assert normalisation")
self.assertEqual(0.0, s.mean_normalisation_unc, "Assert normalisation_unc")
self.assertEqual(0, len(s.lines), "Assert no line data")
linedump = [' Be 5 40050 | |
<filename>scikits/gpu/shader.py<gh_stars>1-10
"""
Copyright (c) 2009, <NAME> <<EMAIL>>
This module was originally based on code from
http://swiftcoder.wordpress.com/2008/12/19/simple-glsl-wrapper-for-pyglet/
which is
Copyright (c) 2008, <NAME>
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__all__ = ['Program', 'VertexShader', 'FragmentShader', 'Shader',
'default_vertex_shader']
from scikits.gpu.config import require_extension, GLSLError
import pyglet.gl as gl
from ctypes import pointer, POINTER, c_char_p, byref, cast, c_char, c_int, \
create_string_buffer
import numpy as np
class Shader:
def __init__(self, source="", type='vertex'):
"""
Vertex, Fragment, or Geometry shader.
Parameters
----------
source : string or list
String or list of strings. The GLSL source code for the shader.
type : {'vertex', 'fragment', 'geometry'}
Type of shader.
"""
shader_type = {'vertex': gl.GL_VERTEX_SHADER,
'fragment': gl.GL_FRAGMENT_SHADER,}
## 'geometry': gl.GL_GEOMETRY_SHADER}
if isinstance(source, basestring):
source = [source]
count = len(source)
# if we have no source code, ignore this shader
if count < 1:
raise GLSLError("No GLSL source provided.")
# create the shader handle
shader = gl.glCreateShader(shader_type[type])
# convert the source strings into a ctypes pointer-to-char array,
# and upload them. This is deep, dark, dangerous black magick -
# don't try stuff like this at home!
src = (c_char_p * count)(*source)
gl.glShaderSource(shader, count,
cast(pointer(src), POINTER(POINTER(c_char))),
None)
# compile the shader
gl.glCompileShader(shader)
temp = c_int(0)
# retrieve the compile status
gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS, byref(temp))
# if compilation failed, print the log
if not temp:
# retrieve the log length
gl.glGetShaderiv(shader, gl.GL_INFO_LOG_LENGTH, byref(temp))
# create a buffer for the log
buffer = create_string_buffer(temp.value)
# retrieve the log text
gl.glGetShaderInfoLog(shader, temp, None, buffer)
# print the log to the console
raise GLSLError(buffer.value)
self.handle = shader
self.source = "\n".join(source)
class VertexShader(Shader):
def __init__(self, source):
Shader.__init__(self, source, type='vertex')
class FragmentShader(Shader):
def __init__(self, source):
Shader.__init__(self, source, type='fragment')
## Not supported yet
## class GeometryShader(Shader):
## def __init__(self, source):
## Shader.__init__(self, source, type='geometry')
def if_in_use(f):
"""Decorator: Execute this function if and only if the program is in use.
"""
def execute_if_in_use(self, *args, **kwargs):
if not self.bound:
raise GLSLError("Shader is not bound. Cannot execute assignment.")
f(self, *args, **kwargs)
for attr in ["func_name", "__name__", "__dict__", "__doc__"]:
setattr(execute_if_in_use, attr, getattr(f, attr))
return execute_if_in_use
class Program(list):
"""A program contains one or more Shader.
"""
def __init__(self, shaders):
try:
list.__init__(self, shaders)
except TypeError:
# In case only one shader was provided
list.__init__(self, [shaders])
self.handle = gl.glCreateProgram()
# not bound yet (i.e. not in rendering pipeline)
self.bound = False
# Variable types and descriptions
self._uniform_type_info = {}
self._link()
def append(self, shader):
"""Append a Shader to the Program.
"""
list.append(self, shader)
if self.bound:
self.bind()
@property
def linked(self):
temp = c_int(0)
# retrieve the link status
gl.glGetProgramiv(self.handle, gl.GL_LINK_STATUS, byref(temp))
return bool(temp)
def _link(self):
for shader in self:
gl.glAttachShader(self.handle, shader.handle);
# link the program
gl.glLinkProgram(self.handle)
temp = c_int(0)
# retrieve the link status
gl.glGetProgramiv(self.handle, gl.GL_LINK_STATUS, byref(temp))
# if linking failed, print the log
if not temp:
# retrieve the log length
gl.glGetProgramiv(self.handle, gl.GL_INFO_LOG_LENGTH, byref(temp))
# create a buffer for the log
buffer = create_string_buffer(temp.value)
# retrieve the log text
gl.glGetProgramInfoLog(self.handle, temp, None, buffer)
# print the log to the console
raise GLSLError(buffer.value)
# Query maximum uniform name length
AUL = gl.GLint()
gl.glGetProgramiv(self.handle, gl.GL_ACTIVE_UNIFORM_MAX_LENGTH,
byref(AUL))
self._ACTIVE_UNIFORM_MAX_LENGTH = AUL.value
self._update_uniform_types()
@property
def active_uniforms(self):
"""Query OpenGL for a list of active uniforms.
This is needed, because we are only allowed to set and query the
values of active uniforms.
"""
# Query number of active uniforms
nr_uniforms = gl.GLint()
gl.glGetProgramiv(self.handle, gl.GL_ACTIVE_UNIFORMS,
byref(nr_uniforms))
nr_uniforms = nr_uniforms.value
length = gl.GLsizei()
size = gl.GLsizei()
enum = gl.GLenum()
name = create_string_buffer(self._ACTIVE_UNIFORM_MAX_LENGTH)
uniforms = []
for i in range(nr_uniforms):
gl.glGetActiveUniform(self.handle, i, 20, byref(length), byref(size),
byref(enum), name)
uniforms.append(name.value)
return uniforms
def _update_uniform_types(self):
"""Determine the numeric types of uniform variables.
Updates the internal dictionary _uniform_type_info[var] with:
kind : {'mat', 'vec', 'int', 'float'}
The kind of numeric type.
size : {2, 3, 4}
The size of the type, e.g., 4 for vec4, 4 for mat2, 1 for scalar.
array : bool
Whether the variable is defined as an array, e.g.,
uniform vec4 x[]; ==> true.
"""
source = ";".join([s.source for s in self])
# And look at each statement individually
source = [s.strip() for s in source.split(';')]
# Now look only at uniform declarations
source = [s[len('uniform')+1:] for s in source if s.startswith('uniform')]
types = [desc_name.split(' ')[:2] for desc_name in source]
# Handle declarations of the form uniform float f=3.0
types = [(desc, name.split('=')[0]) for (desc, name) in types]
type_info = {}
for desc, name in types:
# Check for vector type, e.g. float x[12]
name_array = name.split('[')
var_name = name_array[0]
# If array size is specified, see what it is
if len(name_array) > 1:
array_size = name_array[1].split(']')[0].strip()
if not array_size:
raise RuntimeError("Array declaration without size is not "
"supported.")
array_size = int(array_size)
else:
array_size = 1
# Check if type is, e.g., vec3
vec_param = desc[-1]
if vec_param.isdigit():
size = int(vec_param)
desc = desc[:-1]
else:
size = 1
# For a square matrix, we have the side dimension. To get
# the size, we need to square that.
if desc == 'mat':
size *= size
var_info = {
'kind': desc,
'size': size,
'array': array_size}
if type_info.has_key(var_name) and \
type_info[var_name] != var_info:
raise GLSLError("Inconsistent definition of variable '%s'." % \
var_name)
else:
type_info[var_name] = var_info
self._uniform_type_info = type_info
def use(self):
"""Bind the program into the rendering pipeline.
"""
if not self.linked:
self._link()
# bind the program
gl.glUseProgram(self.handle)
self.bound = True
def disable(self):
"""Unbind all programs in use.
"""
gl.glUseProgram(0)
self.bound = False
def __del__(self):
self.disable()
gl.glDeleteProgram(self.handle)
def _uniform_loc_storage_and_type(self, var):
"""Return the uniform location and a container that can
store its value.
Parameters
----------
var : string
Uniform name.
"""
if var not in self.active_uniforms:
raise GLSLError("Uniform '%s' is not active. Make sure the "
"variable is used in the source code." % var)
try:
var_info = self._uniform_type_info[var]
except KeyError:
raise ValueError("Uniform variable '%s' is not defined in "
"shader source." % var)
# If this is an array, how many values are involved?
count = var_info['array']
if var_info['kind'] in ['int']:
data_type = gl.GLint
else:
data_type = gl.GLfloat
assert gl.glIsProgram(self.handle) == True
assert self.linked
loc = gl.glGetUniformLocation(self.handle, var)
if loc == -1:
raise RuntimeError("Could not query uniform location "
"for '%s'." % var)
storage = data_type * (count * var_info['size'])
storage_nested = count * (data_type * var_info['size'])
return loc, storage, storage_nested, data_type
@if_in_use
def __setitem__(self, var, value):
"""Set uniform variable value.
Please note that matrices must be specified in row-major format.
"""
loc, container, container_nested, dtype = \
self._uniform_loc_storage_and_type(var)
var_info = self._uniform_type_info[var]
count, kind, size = [var_info[k] for k in 'array', 'kind', 'size']
# Ensure the value is given as a list
try:
value = list(value)
except TypeError:
value = [value]
expected_size = var_info['size'] * var_info['array']
if len(value) != var_info['size'] * var_info['array']:
varname = var
if var_info['array'] > 0:
varname += '[%d]' % var_info['array']
raise ValueError("Invalid input size (%s) for (%s) size '%s'." \
% (len(value), expected_size, varname))
if var_info['kind'] == 'mat':
set_func_name = 'glUniformMatrix%dfv' % np.sqrt(var_info['size'])
set_func = getattr(gl, set_func_name)
set_func(loc, count, | |
keys
class AioRedisBackend(RedisBackend):
"""Redis Backend logic that constructed by asyncio-redis.
"""
def __init__(self, poolsize=10, loop=None):
self.version = dfconf['mark_version']
self._loop = loop if loop else asyncio.get_event_loop()
self.poolsize = poolsize
async def init_connection(self):
self.rdb = await aiords.create_redis(
(rdconf['host'], rdconf['port']),
db=int(rdconf['db'])
)
async def set_collection_index(self, name, instance):
""" Set the collection info of instance to the backend.
"""
key = self.colls_index_fmt
v = instance.__class__.__name__
return await self.rdb.hset(key, name, v)
async def get_collection_index(self, name):
""" Get the collection info from backend by name.
"""
key = self.colls_index_fmt
rv = await self.rdb.hget(key, name)
return [name, decode(rv)] if rv else None
async def get_collection_indexes(self):
""" Get all of the collections info from backend.
"""
key = self.colls_index_fmt
rv = await self.rdb.hgetall(key)
if rv:
return {decode(name): decode(info) for name, info in rv.items()}
async def delete_collection_keys(self, coll, klass=''):
""" Danger! This method will erasing all values store in the key that
should be only use it when you really known what are you doing.
It is good for the testing to clean up the environment.
"""
md_key = self.metadata_fmt.format(name=coll.name)
await self.rdb.delete(md_key)
if klass == 'IncreaseCollection':
cache_key = self.inc_coll_cache_fmt.format(name=coll.name)
await self.rdb.delete(cache_key)
async def get_collection_length(self, coll, klass=''):
if not klass:
klass = coll.__class__.__name__
rv = []
md_key = self.metadata_fmt.format(name=coll.name)
md_len = await self.rdb.zcard(md_key)
rv.append(md_len)
# print('** TL -', self.rdb.zrange(md_key, 0, -1, withscores=True))
if klass == 'IncreaseCollection':
cache_key = self.inc_coll_cache_fmt.format(name=coll.name)
cache_len = await self.rdb.hlen(cache_key)
# notice that the cache_len is the length of all the items in cache_key
rv.append(cache_len)
return rv
async def set_collection_metadata(self, coll, tagging, expts, ts, *args):
""" Insert data to the metadata structure if timestamp data do not
exists. Note that the metadata structure include two types, timeline
and expire.
:param coll: collection class
:param tagging: specific tagging string
:param ts: the timestamp of the data
:param expts: the expired timestamp of the data
"""
md_key = self.metadata_fmt.format(name=coll.name)
# Ensure the item of the specific `ts` whether it's exists or not,
element = await self.rdb.zrangebyscore(md_key, ts, ts)
if element:
info = unpackb(element[0])
if tagging in info:
# the tagging info already exists then do nothings
return
info[tagging] = [expts] + list(args)
# remove the md_key and update new value atomically
tr = self.rdb.multi_exec()
tr.zremrangebyscore(md_key, ts, ts)
tr.zadd(md_key, ts, packb(info))
await tr.execute()
else:
info = {tagging: [expts] + list(args)}
await self.rdb.zadd(md_key, ts, packb(info))
# print('-'*10)
# print(tagging)
# print(self.rdb.zrange(md_key, 0, -1, withscores=True))
# print('+'*10)
async def del_collection_metadata_by_items(self, coll, tagging, items):
"""Delete the items of the metadata with the privided timestamp list.
:param items: the items query from metadata, structure should be equal
to the format of the metadata query that specified tagging.
:TODO: add unittest case for the method!
"""
md_key = self.metadata_fmt.format(name=coll.name)
await self._del_collection_metadata(md_key, tagging, items)
async def del_collection_metadata_by_range(self, coll, tagging, start, end):
"""Delete the items of the metadata with the privided start time and
end time arguments.
"""
md_key = self.metadata_fmt.format(name=coll.name)
elements = await self.rdb.zrangebyscore(md_key, start,
end, withscores=True)
if not elements:
return
await self._del_collection_metadata(md_key, tagging, elements)
async def _del_collection_metadata(self, key, tagging, elements):
del_info_todos = []
del_key_todos = []
# know that there is `(member, score) pair`
elements = [(elements[i], elements[i+1])
for i in range(0, len(elements), 2)]
# searching what elements need te be handle
for info, ts in elements:
if isinstance(info, bytes):
info = unpackb(info)
if tagging not in info:
continue
info.pop(tagging)
# when info has not element then should remove the ts key,
# otherwise should update new value to it.
if info:
del_info_todos.append((info, ts))
else:
del_key_todos.append(ts)
# doing the operations that update keys one by one atomically
for info, ts in del_info_todos:
tr = self.rdb.multi_exec()
tr.zremrangebyscore(key, ts, ts)
tr.zadd(key, ts, packb(info))
await tr.execute()
# doing the operations that remove all keys atomically
tr = self.rdb.multi_exec()
for ts in del_key_todos:
tr.zremrangebyscore(key, ts, ts)
await tr.execute()
async def query_collection_metadata(self, coll, tagging, start, end, ret_whold=False):
return await self._query_collection_metadata(coll, start, end,
tagging, ret_whold)
async def query_collection_metadata_tagging(self, coll, start, end):
return await self._query_collection_metadata(coll, start,
end, '__taggings__')
async def query_collection_metadata_all(self, coll, start, end):
return await self._query_collection_metadata(coll, start,
end, '__all__')
async def _query_collection_metadata(self, coll, start, end, tagging='', ret_whold=False):
""" Do the real operations for query metadata from the redis.
:param coll: the collection class use to fetch name
:param start: the start time of the query
:param end: the end time of the query
:param tagging: the tagging for query
:param ret_whold: whether return all the info when specified a tagging
:ret: return None if no data exists.
If tagging is specified '__taggings__', return value only contain the taggings:
# ts: all_tagging
{
ts1: [tagging1, tagging2, ..., targetN],
ts2: [tagging1, tagging2, ..., targetN],
...
tsN: [tagging1, tagging2, ..., targetN],
}
If tagging is specified '__all__', return value include all the info:
# ts: all_tagging_info
{
ts1: {tagging1: info1, tagging2: info2, ...},
ts2: {tagging1: info1, tagging2: info2, ...},
...
tsN: {tagging1: info1, tagging2: info2, ...},
}
If tagging is specified other, return value is the info that match the tagging:
# value, score if ret_whold == False
[(info1, ts1), (info2, ts2), ... (infoN, tsN)]
# else:
[
({tagging1: info1, tagging2: info2, ...}, ts1),
({tagging1: info1, tagging2: info2, ...}, ts2),
...
({tagging1: info1, tagging2: info2, ...}, tsN),
]
"""
md_key = self.metadata_fmt.format(name=coll.name)
elements = await self.rdb.zrangebyscore(md_key, start,
end, withscores=True)
if not elements:
return
else:
# know that there is `(member, score) pair`
elements = [(elements[i], elements[i+1])
for i in range(0, len(elements), 2)]
if tagging == '__taggings__' or tagging == '__all__':
rv = {}
else:
rv = []
# searching what elements should be match
for info, ts in elements:
info = unpackb(info)
if tagging == '__taggings__':
rv[ts] = list(info.keys())
elif tagging == '__all__':
rv[ts] = info
elif tagging in info:
if ret_whold:
rv.append((info, ts))
else:
rv.append((info[tagging], ts))
return rv
async def inc_coll_cache_set(self, coll, field, value):
key = self.inc_coll_cache_fmt.format(name=coll.name)
await self.rdb.hset(key, field, packb(value))
async def inc_coll_caches_get(self, coll, *fields):
"""
:ret: return [] if no data exists. Normal structure is:
[value1, value2, ..., valueN]
"""
if not fields:
return []
key = self.inc_coll_cache_fmt.format(name=coll.name)
rv = await self.rdb.hmget(key, *fields)
# print('inc_coll_caches_get - ', rv)
# print('inc_coll_caches_get After - ', [unpackb(r) for r in rv if r])
return [unpackb(r) for r in rv if r]
async def inc_coll_caches_del(self, coll, *fields):
key = self.inc_coll_cache_fmt.format(name=coll.name)
return await self.rdb.hdel(key, *fields)
async def uniq_count_coll_cache_set(self, coll, ts, tagging, values):
"""
:param values: should be a iterable object contain members
"""
values = {packb(v) for v in values}
key_fmt = self.unique_count_coll_cache_fmt
key = key_fmt.format(name=coll.name, tagging=tagging, ts=ts)
return await self.rdb.sadd(key, *values)
async def uniq_count_coll_cache_get(self, coll, tagging, timestamps, count_only=False):
key_fmt = self.unique_count_coll_cache_fmt
rv = []
for ts in timestamps:
key = key_fmt.format(name=coll.name, tagging=tagging, ts=ts)
if count_only:
count = await self.rdb.scard(key)
rv.append(count)
else:
members = await self.rdb.smembers(key)
rv.append({unpackb(m) for m in members})
return rv
async def uniq_count_coll_cache_pop(self, coll, tagging, timestamps, number):
"""
:note: Redis `SPOP key [count]` command, The count argument will be
available in a later version and is not available
in 2.6, 2.8, 3.0.
Now use SRANDMEMBER and SREM commands to mimic the effect of
SPOP count.
"""
key_fmt = self.unique_count_coll_cache_fmt
rv = []
for ts in timestamps:
key = key_fmt.format(name=coll.name, tagging=tagging, ts=ts)
# :: srandmember + srem == spop(key, number)
members = await self.rdb.srandmember(key, number)
await self.rdb.srem(key, *members)
rv.append({unpackb(m) for m in members})
return rv
async def uniq_count_coll_cache_del(self, coll, tagging, timestamps):
keys = self._gen_count_keys(coll.name, tagging,
'unique_count', timestamps)
return await self.rdb.delete(*keys)
async def sorted_count_coll_cache_set(self, coll, ts, tagging, values):
"""
:param values: should be a dict of <member: score> pair
"""
key_fmt = self.sorted_count_coll_cache_fmt
key = key_fmt.format(name=coll.name, tagging=tagging, ts=ts)
add_val = []
for member, score in values.items():
add_val.append(score)
add_val.append(packb(member))
return await self.rdb.zadd(key, *add_val)
async def sorted_count_coll_cache_get(self, coll, tagging, timestamps, topN=None):
key_fmt = self.sorted_count_coll_cache_fmt
rv = []
for ts in timestamps:
key = key_fmt.format(name=coll.name, tagging=tagging, ts=ts)
if topN:
elements = await self.rdb.zrange(key, -topN, -1, withscores=True)
else:
elements = await self.rdb.zrange(key, 0, -1, withscores=True)
# know that there is `(member, score) pair`
elements = [(unpackb(elements[i]), elements[i+1])
for i in | |
id=None,
max_alloc=None,
max_stdalloc=None,
name=None,
padded_size=None,
pool_misses=None,
type=None,
):
super(GlusterMemoryPool, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.alloc_count = alloc_count
self.cold_count = cold_count
self.hot_count = hot_count
self.max_alloc = max_alloc
self.max_stdalloc = max_stdalloc
self.padded_size = padded_size
self.pool_misses = pool_misses
self.type = type
@property
def cold_count(self):
"""
Returns the value of the `cold_count` property.
"""
return self._cold_count
@cold_count.setter
def cold_count(self, value):
"""
Sets the value of the `cold_count` property.
"""
self._cold_count = value
@property
def pool_misses(self):
"""
Returns the value of the `pool_misses` property.
"""
return self._pool_misses
@pool_misses.setter
def pool_misses(self, value):
"""
Sets the value of the `pool_misses` property.
"""
self._pool_misses = value
@property
def padded_size(self):
"""
Returns the value of the `padded_size` property.
"""
return self._padded_size
@padded_size.setter
def padded_size(self, value):
"""
Sets the value of the `padded_size` property.
"""
self._padded_size = value
@property
def max_stdalloc(self):
"""
Returns the value of the `max_stdalloc` property.
"""
return self._max_stdalloc
@max_stdalloc.setter
def max_stdalloc(self, value):
"""
Sets the value of the `max_stdalloc` property.
"""
self._max_stdalloc = value
@property
def alloc_count(self):
"""
Returns the value of the `alloc_count` property.
"""
return self._alloc_count
@alloc_count.setter
def alloc_count(self, value):
"""
Sets the value of the `alloc_count` property.
"""
self._alloc_count = value
@property
def hot_count(self):
"""
Returns the value of the `hot_count` property.
"""
return self._hot_count
@hot_count.setter
def hot_count(self, value):
"""
Sets the value of the `hot_count` property.
"""
self._hot_count = value
@property
def max_alloc(self):
"""
Returns the value of the `max_alloc` property.
"""
return self._max_alloc
@max_alloc.setter
def max_alloc(self, value):
"""
Sets the value of the `max_alloc` property.
"""
self._max_alloc = value
@property
def type(self):
"""
Returns the value of the `type` property.
"""
return self._type
@type.setter
def type(self, value):
"""
Sets the value of the `type` property.
"""
self._type = value
class GlusterServerHook(Identified):
def __init__(
self,
checksum=None,
comment=None,
content_type=None,
description=None,
host=None,
id=None,
name=None,
status=None,
):
super(GlusterServerHook, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.checksum = checksum
self.content_type = content_type
self.host = host
self.status = status
@property
def content_type(self):
"""
Returns the value of the `content_type` property.
"""
return self._content_type
@content_type.setter
def content_type(self, value):
"""
Sets the value of the `content_type` property.
"""
Struct._check_type('content_type', value, HookContentType)
self._content_type = value
@property
def host(self):
"""
Returns the value of the `host` property.
"""
return self._host
@host.setter
def host(self, value):
"""
Sets the value of the `host` property.
"""
Struct._check_type('host', value, Host)
self._host = value
@property
def checksum(self):
"""
Returns the value of the `checksum` property.
"""
return self._checksum
@checksum.setter
def checksum(self, value):
"""
Sets the value of the `checksum` property.
"""
self._checksum = value
@property
def status(self):
"""
Returns the value of the `status` property.
"""
return self._status
@status.setter
def status(self, value):
"""
Sets the value of the `status` property.
"""
Struct._check_type('status', value, GlusterHookStatus)
self._status = value
class GlusterVolume(Identified):
def __init__(
self,
bricks=None,
cluster=None,
comment=None,
description=None,
disperse_count=None,
id=None,
name=None,
options=None,
redundancy_count=None,
replica_count=None,
statistics=None,
status=None,
stripe_count=None,
transport_types=None,
volume_type=None,
):
super(GlusterVolume, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.bricks = bricks
self.cluster = cluster
self.disperse_count = disperse_count
self.options = options
self.redundancy_count = redundancy_count
self.replica_count = replica_count
self.statistics = statistics
self.status = status
self.stripe_count = stripe_count
self.transport_types = transport_types
self.volume_type = volume_type
@property
def disperse_count(self):
"""
Returns the value of the `disperse_count` property.
"""
return self._disperse_count
@disperse_count.setter
def disperse_count(self, value):
"""
Sets the value of the `disperse_count` property.
"""
self._disperse_count = value
@property
def cluster(self):
"""
Returns the value of the `cluster` property.
"""
return self._cluster
@cluster.setter
def cluster(self, value):
"""
Sets the value of the `cluster` property.
"""
Struct._check_type('cluster', value, Cluster)
self._cluster = value
@property
def bricks(self):
"""
Returns the value of the `bricks` property.
"""
return self._bricks
@bricks.setter
def bricks(self, value):
"""
Sets the value of the `bricks` property.
"""
self._bricks = value
@property
def transport_types(self):
"""
Returns the value of the `transport_types` property.
"""
return self._transport_types
@transport_types.setter
def transport_types(self, value):
"""
Sets the value of the `transport_types` property.
"""
self._transport_types = value
@property
def volume_type(self):
"""
Returns the value of the `volume_type` property.
"""
return self._volume_type
@volume_type.setter
def volume_type(self, value):
"""
Sets the value of the `volume_type` property.
"""
Struct._check_type('volume_type', value, GlusterVolumeType)
self._volume_type = value
@property
def redundancy_count(self):
"""
Returns the value of the `redundancy_count` property.
"""
return self._redundancy_count
@redundancy_count.setter
def redundancy_count(self, value):
"""
Sets the value of the `redundancy_count` property.
"""
self._redundancy_count = value
@property
def options(self):
"""
Returns the value of the `options` property.
"""
return self._options
@options.setter
def options(self, value):
"""
Sets the value of the `options` property.
"""
self._options = value
@property
def replica_count(self):
"""
Returns the value of the `replica_count` property.
"""
return self._replica_count
@replica_count.setter
def replica_count(self, value):
"""
Sets the value of the `replica_count` property.
"""
self._replica_count = value
@property
def status(self):
"""
Returns the value of the `status` property.
"""
return self._status
@status.setter
def status(self, value):
"""
Sets the value of the `status` property.
"""
Struct._check_type('status', value, GlusterVolumeStatus)
self._status = value
@property
def stripe_count(self):
"""
Returns the value of the `stripe_count` property.
"""
return self._stripe_count
@stripe_count.setter
def stripe_count(self, value):
"""
Sets the value of the `stripe_count` property.
"""
self._stripe_count = value
@property
def statistics(self):
"""
Returns the value of the `statistics` property.
"""
return self._statistics
@statistics.setter
def statistics(self, value):
"""
Sets the value of the `statistics` property.
"""
self._statistics = value
class GlusterVolumeProfileDetails(Identified):
def __init__(
self,
brick_profile_details=None,
comment=None,
description=None,
id=None,
name=None,
nfs_profile_details=None,
):
super(GlusterVolumeProfileDetails, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.brick_profile_details = brick_profile_details
self.nfs_profile_details = nfs_profile_details
@property
def brick_profile_details(self):
"""
Returns the value of the `brick_profile_details` property.
"""
return self._brick_profile_details
@brick_profile_details.setter
def brick_profile_details(self, value):
"""
Sets the value of the `brick_profile_details` property.
"""
self._brick_profile_details = value
@property
def nfs_profile_details(self):
"""
Returns the value of the `nfs_profile_details` property.
"""
return self._nfs_profile_details
@nfs_profile_details.setter
def nfs_profile_details(self, value):
"""
Sets the value of the `nfs_profile_details` property.
"""
self._nfs_profile_details = value
class GraphicsConsole(Identified):
def __init__(
self,
address=None,
comment=None,
description=None,
id=None,
instance_type=None,
name=None,
port=None,
protocol=None,
template=None,
tls_port=None,
vm=None,
):
super(GraphicsConsole, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.address = address
self.instance_type = instance_type
self.port = port
self.protocol = protocol
self.template = template
self.tls_port = tls_port
self.vm = vm
@property
def template(self):
"""
Returns the value of the `template` property.
"""
return self._template
@template.setter
def template(self, value):
"""
Sets the value of the `template` property.
"""
Struct._check_type('template', value, Template)
self._template = value
@property
def port(self):
"""
Returns the value of the `port` property.
"""
return self._port
@port.setter
def port(self, value):
"""
Sets the value of the `port` property.
"""
self._port = value
@property
def address(self):
"""
Returns the value of the `address` property.
"""
return self._address
@address.setter
def address(self, value):
"""
Sets the value of the `address` property.
"""
self._address = value
@property
def instance_type(self):
"""
Returns the value of the `instance_type` property.
"""
return self._instance_type
@instance_type.setter
def instance_type(self, value):
"""
Sets the value of the `instance_type` property.
"""
Struct._check_type('instance_type', value, InstanceType)
self._instance_type = value
@property
def vm(self):
"""
Returns the value of the `vm` property.
"""
return self._vm
@vm.setter
def vm(self, value):
"""
Sets the value of the `vm` property.
"""
Struct._check_type('vm', value, Vm)
self._vm = value
@property
def protocol(self):
"""
Returns the value of the `protocol` property.
"""
return self._protocol
@protocol.setter
def protocol(self, value):
"""
Sets the value of the `protocol` property.
"""
Struct._check_type('protocol', value, GraphicsType)
self._protocol = value
@property
def tls_port(self):
"""
Returns the value of the `tls_port` property.
"""
return self._tls_port
@tls_port.setter
def tls_port(self, value):
"""
Sets the value of the `tls_port` property.
"""
self._tls_port = value
class Group(Identified):
def __init__(
self,
comment=None,
description=None,
domain=None,
domain_entry_id=None,
id=None,
name=None,
namespace=None,
permissions=None,
roles=None,
tags=None,
):
super(Group, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.domain = domain
self.domain_entry_id = domain_entry_id
self.namespace = namespace
self.permissions = permissions
self.roles = roles
self.tags = tags
@property
def domain(self):
"""
Returns the value of the `domain` property.
"""
return self._domain
@domain.setter
def domain(self, value):
"""
Sets the value of the `domain` property.
"""
Struct._check_type('domain', value, Domain)
self._domain = value
@property
def permissions(self):
"""
Returns the value of the `permissions` property.
"""
return self._permissions
@permissions.setter
def permissions(self, value):
| |
<filename>src/workqueue/__init__.py
import contextlib
import logging
import os
import selectors
import subprocess
import tempfile
import threading
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from pathlib import Path
from typing import ContextManager, Dict, Iterator, NamedTuple, Optional
logger = logging.getLogger(__name__)
POLL_INTERVAL = 0.25 # seconds
class OverviewViewParams(NamedTuple):
server: str
"""URL for Overview API server. e.g.: `https://www.overviewdocs.com`"""
document_set_id: str
"""DocumentSet ID on Overview API server."""
api_token: str
"""Token granting access to document set on Overview API server."""
class Progress(NamedTuple):
"""A snapshot in time of a Job's status."""
n_ahead_in_queue: int = 0
"""Number of jobs ahead of the requested one (0 means 'this one is running')."""
fraction: float = 0.0
"""Value from 0.0 to 1.0 indicating how far along this job is."""
message: Optional[str] = None
"""Message that program provided alongside fraction."""
returncode: Optional[int] = None
"""0 if job completed successfully; non-0 if it completed unsuccessfully."""
error: Optional[str] = None
"""Error message, if `returncode` is set."""
def _run_and_log_exceptions(fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except Exception:
logger.exception("Exception in workqueue module")
raise
@dataclass
class Job:
params: OverviewViewParams
was_started: threading.Event = field(default_factory=threading.Event)
last_stdout_line: Optional[bytes] = None
"""Last line of progress from subprocess.
e.g., "0.23" or "0.23\tworking...".
Do not read this until .was_started.is_set().
"""
was_completed: threading.Event = field(default_factory=threading.Event)
"""When set, job is completed."""
returncode: Optional[int] = None
"""Process returncode. 0 means success.
Do not read this until .was_completed.is_set().
"""
stderr: Optional[bytes] = None
"""Process stderr (in binary).
Do not read this until .was_completed.is_set().
"""
@property
def current_progress(self) -> Progress:
"""
Progress, if the job is running or completed. Otherwise 0.0.
"""
if self.returncode is not None:
if self.returncode == 0:
error = None
else:
stderr = self.stderr.decode("utf-8", errors="replace")
error = f"Exited with code {self.returncode}\nstderr:\n{stderr}"
return Progress(fraction=1.0, returncode=self.returncode, error=error)
if self.last_stdout_line is not None:
parts = self.last_stdout_line.decode("utf-8", errors="replace").split(
"\t", 1
)
try:
fraction = float(parts[0])
if fraction < 0.0 or fraction > 1.0:
raise ValueError(
"Fraction must be between 0.0 and 1.0; got %f" % fraction
)
except ValueError:
logger.warning(
"invalid program: stdout must look like '0.25\\tmessage'; got %r",
self.last_stdout_line.decode("utf-8", errors="replace"),
)
return Progress()
if len(parts) == 2:
message = parts[1]
else:
message = None
return Progress(fraction=fraction, message=message)
return Progress()
@dataclass
class State:
"""Currently running and pending jobs.
This is not thread-safe. Callers must coordinate using a lock.
"""
pending: Dict[OverviewViewParams, Job] = field(default_factory=dict)
"""Unstarted Jobs. Used for querying status."""
running: Dict[OverviewViewParams, Job] = field(default_factory=dict)
"""Started, not-yet-completed jobs."""
@contextlib.contextmanager
def _tempfile_context(**kwargs) -> ContextManager[Path]:
fd, tempfile_name = tempfile.mkstemp(**kwargs)
try:
yield Path(tempfile_name)
finally:
try:
os.unlink(tempfile_name)
except FileNotFoundError:
pass
@dataclass(frozen=True)
class WorkQueue:
"""Debouncer of huge jobs.
We never run two jobs with the same `params`.
To the caller, this monolithic object can be used as follows:
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from workqueue import WorkQueue, OverviewViewParams
# once, ever:
work = WorkQueue(
program_path=Path(__file__).parent / "do_work.py",
executor=ThreadPoolExecutor(2, thread_name_prefix="my-work")
)
# and once per, say, HTTP request in our hypothetical HTTP framework:
def serve(req, res):
params = OverviewViewParams(
req.query_string["server"],
req.query_string["document_set_id"],
req.auth_header["username"], # api_token
)
maybe_job = work.ensure_run(params)
if job is None:
# the job was already finished with these params.
res.send(204) # No Content
return
else:
res.send(200) # OK (we presume -- gotta send a header!)
res.send_header("Content-Type", "text/plain; charset="utf-8")
for progress in work.report_job_progress_until_completed(job):
# Simple logic: send one line of progress info.
#
# Better logic would be to "debounce": don't send _every_
# event, but only send events when the HTTP send buffer is
# empty. This will get events to the user sooner.
res.send(json.dumps(progress._asdict()).encode("utf-8"))
# Now the job is completed.
return
If a job completes with an error no state will be stored. The next call to
`.ensure_run()` will re-start from scratch.
This class can be used as a context manager:
with WorkQueue(
program_path=Path(__file__).parent / "do_work.py",
executor=ThreadPoolExecutor(2, thread_name_prefix="my-work")
) as work:
# ...
# and when we exit, the ThreadPoolExecutor will be shut down.
"""
program_path: Path
"""Path to a program with 4 positional params. Run as:
/path/to/program server document_set_id api_token output_path
The program must:
* Write only progress events to stdout. A progress event looks
like "0.24\n" or "0.24\tdoing something...\n".
* Exit with returncode 0 on success and write to `output_path`.
* Exit with returncode !=0 on failure, optioally writing to stderr.
"""
executor: ThreadPoolExecutor
storage_dir: Path
"""Path where we store data."""
state_lock: threading.Lock = field(default_factory=threading.Lock)
state: State = field(default_factory=State)
def _run_one_job(self, job) -> None:
"""Called by self.executor.
Execute one job, updating state and job.current_progress.
"""
destination_path = self.destination_path_for_params(job.params)
destination_path.parent.mkdir(parents=True, exist_ok=True)
# Set to running
with self.state_lock:
del self.state.pending[job.params]
self.state.running[job.params] = job
job.was_started.set()
with _tempfile_context(
dir=self.storage_dir, prefix="building-model-", suffix=".tmp"
) as tempfile_path:
# Run the subprocess. Sets job.last_stdout_line repeatedly, then
# job.stderr and job.returncode
with subprocess.Popen(
[
self.program_path,
job.params.server,
job.params.document_set_id,
job.params.api_token,
tempfile_path.as_posix(),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0, # so progress reports aren't delayed
close_fds=True,
) as popen:
# Read from both stdout and stderr at the same time. This
# requires non-blocking reads and selectors. (If we don't do
# this, we'd need to read with threads ... or we'd get deadlock
# when a buffer fills.)
#
# Primer: to run a process, we must:
#
# 1. Start it (subprocess.Popen())
# 2. Read from its stdout and stderr as they get filled. (If we don't
# read, the process' writes will stall forever.)
# 3. Read until its stdout and stderr are closed.
# 4. wait() to get its retval. (subprocess.Popen.wait().)
stdout_buf = b""
stderr = []
with selectors.DefaultSelector() as selector:
selector.register(popen.stdout.fileno(), selectors.EVENT_READ)
selector.register(popen.stderr.fileno(), selectors.EVENT_READ)
while selector.get_map():
ready = selector.select()
for key, events in ready:
chunk = os.read(key.fd, 32768)
if not chunk:
# The subprocess closed this fd (stdout/stderr).
#
# This typically happens when the subprocess exits.
selector.unregister(key.fd)
else:
# We just read a chunk.
if key.fd == popen.stdout.fileno():
# stdout: maintain a buffer of half-finished lines;
# write the last-finished line to
# job.last_stdout_line.
stdout_buf += chunk
while b"\n" in stdout_buf:
(
job.last_stdout_line,
stdout_buf,
) = stdout_buf.split(b"\n", 1)
elif key.fd == popen.stderr.fileno():
# stderr: append to our (infinite) buffer.
stderr.append(chunk)
# ... and loop, until we've removed everything from `selector`,
# meaning the subprocess closed its stdout+stderr.
job.returncode = popen.wait()
job.stderr = b"".join(stderr)
if job.returncode == 0:
self._move_job_output_or_set_job_error(
job, tempfile_path, destination_path
)
# Set to completed
with self.state_lock:
del self.state.running[job.params]
job.was_completed.set()
def _move_job_output_or_set_job_error(
self, job: Job, tempfile_path: Path, destination_path: Path
) -> None:
"""Atomically move job output to destination_path.
In case of error, set job.returncode to -999 and job.stderr to a
message.
"""
try:
size = os.stat(tempfile_path).st_size
except OSError as err:
job.returncode = -999 # clearly not a POSIX returncode
job.stderr = b"Failed to stat output file: " + str(err).encode("utf-8")
logger.exception("Failed to stat output file")
return
if size == 0:
# Assume the file is empty because the script neglected to
# write to it -- erroneously.
job.returncode = -999 # clearly not a POSIX returncode
job.stderr = b"invalid program: it should have written to its output file"
logger.warning("invalid program: it should have written to its output file")
return
# Rename tempfile to its final resting place.
#
# Hard-link so tempfile.NamedTemporaryFile() can unlink its
# own handle without error ... and to ensure we're atomic.
try:
os.link(tempfile_path, destination_path)
except OSError as err:
job.returncode = -999 # clearly not a POSIX returncode
job.stderr = b"Failed to link destination file: " + str(err).encode("utf-8")
logger.exception("Failed to link destination file")
return
# Everything is okay.
def destination_path_for_params(self, params: OverviewViewParams) -> Path:
return (
self.storage_dir
/ params.server.split("/")[2]
/ params.document_set_id
/ f"{params.api_token}.out"
)
def ensure_run(self, params: OverviewViewParams) -> Optional[Job]:
"""Ensure a Job has been queued or completed with params `params`.
Return `None` if we know the Job has been fully completed.
Otherwise, return a new or existing `Job`.
"""
with self.state_lock:
# Since we're multi-threaded, we can't tell how far along the
# _actual_ job is. We only have "bounds" on how far along it is.
# For instance: a job in `state.running` has _started_ executing,
# but we don't know whether it's completed.
# running (or completed but still in RAM)?
try:
return | |
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import glob
import argparse
import pickle
from PIL import Image
import os
DEFAULT_COLOR_THRESHOLD = (190, 255)
DEFAULT_ABS_THRESHOLD = (0, 255)
DEFAULT_MAG_THRESHOLD = (0, 255)
DEFAULT_DIR_THRESHOLD = (0, np.pi/6)
DEFAULT_COLOR_SOBEL_KERNEL = 3
DEFAULT_DIR_SOBEL_KERNEL = 3
DEFAULT_MAG_SOBEL_KERNEL = 3
# Notes on shape.
# img_file = 'camera_cal/calibration1.jpg'
# img.shape
# (720, 1280, 3)
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# gray.shape
# (720, 1280)
# 720 is the height, 1280 is the width of the image here, 3 - channels.
def convert_to_grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def get_object_image_points(img_file, shape=(9, 6)):
"""
Returns (obj_points, img_points) tuple for a chess board image.
:param img_file (str) - Name of the image file to callibrate.
:param shape tuple(int, int) - Shape of the checker board.
:return (status, obj_points, img_points).
status is true if the findChessboardCorners call succeeds.
obj_points is the list of 3d points in the real world.
img_points is the list of 2d points in the image.
"""
img = mpimg.imread(img_file)
obj_p = np.zeros((shape[0] * shape[1], 3), np.float32)
obj_p[:, :2] = np.mgrid[0:shape[0], 0:shape[1]].T.reshape(-1, 2)
gray = convert_to_grayscale(img)
ret, corners = cv2.findChessboardCorners(gray, shape, None)
# obj_points - 3d points in real word
# img_points - 2d points in image plane
return ret, obj_p, corners
def callibrate_camera(file_names, shape=(9, 6)):
"""
Callibrates the camera given file_names of distorted chess board images.
:param file_names (list[str]) - files containing chess board images.
:param shape ((int, int)) - (corners per row, corners per column)
:return (ret, mtx, dist, rvecs, tvecs).
The return value is the output of calibrateCamera
"""
all_obj_points = []
all_img_points = []
prev_shape = None
for f in file_names:
print('Processing {f}'.format(f=f))
status, obj_points, img_points = get_object_image_points(f, shape)
curr_shape = convert_to_grayscale(mpimg.imread(f)).shape
if prev_shape:
if prev_shape != curr_shape:
print('Shapes mismatch for {f} : {p} vs {c}'.format(
f=f, p=prev_shape, c=curr_shape))
prev_shape = curr_shape
if status:
all_obj_points.append(obj_points)
all_img_points.append(img_points)
else:
print('Could not find chess board corners for {f}'.format(f=f))
return cv2.calibrateCamera(
all_obj_points, all_img_points, curr_shape, None, None
)
def undistort_and_save_image(output_dir, file_name, mtx, dist):
print('Undistorting {f} {m} {d}'.format(f=file_name, m=mtx, d=dist))
dist = cv2.undistort(mpimg.imread(file_name), mtx, dist, None, mtx)
im = Image.fromarray(dist)
base_name = os.path.basename(file_name)
prefix = base_name.split('.')[0]
full_path = os.path.join(output_dir, '{p}_dist.jpg'.format(p=prefix))
print('Undistorting file {f} to {o}'.format(f=file_name, o=full_path))
im.save(full_path)
def sobel_abs_threshold(img, orient='x', thresh=DEFAULT_ABS_THRESHOLD):
"""
Return the mage after applying threshold on sobel.
:param img (numpy array) - image.
:param orient (str) - Either 'x' or 'y'.
:param thresh_min (int) - Minimum threshold value.
:param thresh_max (int) - Maximum threshold value.
:return returns the image with only the pixels which have sobel values in
the given range.
"""
# Apply the following steps to img
# 1) Convert to grayscale
gray = convert_to_grayscale(img)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
result = np.zeros_like(scaled_sobel)
result[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return result
def sobel_mag_threshold(img, sobel_kernel=DEFAULT_MAG_SOBEL_KERNEL, mag_thresh=DEFAULT_MAG_THRESHOLD):
"""
Return the image after applying threshold on magnitude of sobel.
:param img (numpy array) - image.
:param orient (str) - Either 'x' or 'y'.
:param thresh_min (int) - Minimum threshold value.
:param thresh_max (int) - Maximum threshold value.
:return returns the image with only the pixels which have sobel values in
the given range.
"""
# Convert to grayscale
gray = convert_to_grayscale(img)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# Return the binary image
return binary_output
def dir_threshold(img, sobel_kernel=DEFAULT_DIR_SOBEL_KERNEL, thresh=DEFAULT_DIR_THRESHOLD):
# Grayscale
gray = convert_to_grayscale(img)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
def color_threshold(img, thresh=DEFAULT_COLOR_THRESHOLD):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
binary_output = np.zeros_like(s_channel)
binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1
return binary_output
def combined_gradient_color_threshold(
img,
grad_x_thresh=DEFAULT_ABS_THRESHOLD,
color_thresh=DEFAULT_COLOR_THRESHOLD
):
s_binary = color_threshold(img, color_thresh)
sxbinary = sobel_abs_threshold(
img, orient='x', thresh=grad_x_thresh
)
# Combine color and x gradient threshold results.
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
def combined_dir_color_threshold(
img,
grad_x_thresh=DEFAULT_ABS_THRESHOLD,
dir_thresh=DEFAULT_DIR_THRESHOLD
):
s_binary = color_threshold(img, color_thresh)
threshold_binary = dir_threshold(img, thresh=dir_thresh)
combined_binary = np.zeros_like(threshold_binary)
combined_binary[(s_binary == 1) | (threshold_binary == 1)] = 1
return combined_binary
"""
dst_points=np.float32([
[268.552, 10],
[1034.03, 10],
[268.552, 675.317],
[1034.03, 675.317],
])
"""
def get_perspective_transform_matrix(
mtx,
dist,
file_name='test_images/straight_lines1.jpg',
src_points=np.float32([
[576.04, 464.487], [707.079, 464.487],
[268.552, 675.317], [1034.03, 675.317]]
),
dst_points=np.float32([
[150, 150],
[1000, 150],
[150, 700],
[1000, 700],
])
):
"""
Applies perspective transform to undist, mapping src_points to dst_points.
This produces an image of the same size as the original image.
:param mtx (numpy array) - Matrix for removing distortion.
:param dist (numpy array) - Matrix for displacement.
:param file_name (str) - Name of the image file.
:param src_points (list[points]) - List of points in the source image.
These are assumed to be four points provided in the following order -
Top left, top right, bottom left, bottom right.
:param dst_points (list[points]) - Points with 1:1 mapping to src_points.
:return returns the matrix which produces the perspective transform.
"""
img = mpimg.imread(file_name)
undist = cv2.undistort(img, mtx, dist, None, mtx)
gray = convert_to_grayscale(undist)
# img_size is width x height :-(.
width = gray.shape[1]
height = gray.shape[0]
return (cv2.getPerspectiveTransform(src_points, dst_points), cv2.getPerspectiveTransform(dst_points, src_points))
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
Draws lines over the image.
"""
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness)
def get_points_for_lanes(src):
return [
[(src[0][0], src[0][1], src[2][0], src[2][1])],
[(src[1][0], src[1][1], src[3][0], src[3][1])],
]
def hist(img):
# Grab only the bottom half of the image
# Lane lines are likely to be mostly vertical nearest to the car
bottom_half = img[img.shape[0]//2:,:]
# Sum across image pixels vertically - make sure to set `axis`
# i.e. the highest areas of vertical lines should be larger values
histogram = np.sum(bottom_half, axis=0)
return histogram
def find_lane_pixels(binary_warped):
"""
Returns the left and right lane pixels.
:param binary_warped - Warped image.
Returns (leftx, lefty, rightx, righty, out_img) where
leftx - x coordinates of the points in the left lane.
rightx - y coordinates of the points in the left lane.
rightx - x coordinates of the points in the right lane.
righty - y coordinates of the points in the right lane.
"""
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions | |
<reponame>ederst/pulumi-openstack
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['ClusterTemplate']
class ClusterTemplate(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apiserver_port: Optional[pulumi.Input[int]] = None,
cluster_distro: Optional[pulumi.Input[str]] = None,
coe: Optional[pulumi.Input[str]] = None,
dns_nameserver: Optional[pulumi.Input[str]] = None,
docker_storage_driver: Optional[pulumi.Input[str]] = None,
docker_volume_size: Optional[pulumi.Input[int]] = None,
external_network_id: Optional[pulumi.Input[str]] = None,
fixed_network: Optional[pulumi.Input[str]] = None,
fixed_subnet: Optional[pulumi.Input[str]] = None,
flavor: Optional[pulumi.Input[str]] = None,
floating_ip_enabled: Optional[pulumi.Input[bool]] = None,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input[str]] = None,
insecure_registry: Optional[pulumi.Input[str]] = None,
keypair_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
master_flavor: Optional[pulumi.Input[str]] = None,
master_lb_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
network_driver: Optional[pulumi.Input[str]] = None,
no_proxy: Optional[pulumi.Input[str]] = None,
public: Optional[pulumi.Input[bool]] = None,
region: Optional[pulumi.Input[str]] = None,
registry_enabled: Optional[pulumi.Input[bool]] = None,
server_type: Optional[pulumi.Input[str]] = None,
tls_disabled: Optional[pulumi.Input[bool]] = None,
volume_driver: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a V1 Magnum cluster template resource within OpenStack.
## Example Usage
### Create a Cluster template
```python
import pulumi
import pulumi_openstack as openstack
clustertemplate1 = openstack.containerinfra.ClusterTemplate("clustertemplate1",
coe="kubernetes",
dns_nameserver="1.1.1.1",
docker_storage_driver="devicemapper",
docker_volume_size=10,
flavor="m1.small",
floating_ip_enabled=False,
image="Fedora-Atomic-27",
labels={
"influx_grafana_dashboard_enabled": "true",
"kube_dashboard_enabled": "true",
"kube_tag": "1.11.1",
"prometheus_monitoring": "true",
},
master_flavor="m1.medium",
master_lb_enabled=True,
network_driver="flannel",
server_type="vm",
volume_driver="cinder")
```
## Argument reference
The following arguments are supported:
* `region` - (Optional) The region in which to obtain the V1 Container Infra
client. A Container Infra client is needed to create a cluster template. If
omitted,the `region` argument of the provider is used. Changing this
creates a new cluster template.
* `name` - (Required) The name of the cluster template. Changing this updates
the name of the existing cluster template.
* `project_id` - (Optional) The project of the cluster template. Required if
admin wants to create a cluster template in another project. Changing this
creates a new cluster template.
* `user_id` - (Optional) The user of the cluster template. Required if admin
wants to create a cluster template for another user. Changing this creates
a new cluster template.
* `apiserver_port` - (Optional) The API server port for the Container
Orchestration Engine for this cluster template. Changing this updates the
API server port of the existing cluster template.
* `coe` - (Required) The Container Orchestration Engine for this cluster
template. Changing this updates the engine of the existing cluster
template.
* `cluster_distro` - (Optional) The distro for the cluster (fedora-atomic,
coreos, etc.). Changing this updates the cluster distro of the existing
cluster template.
* `dns_nameserver` - (Optional) Address of the DNS nameserver that is used in
nodes of the cluster. Changing this updates the DNS nameserver of the
existing cluster template.
* `docker_storage_driver` - (Optional) Docker storage driver. Changing this
updates the Docker storage driver of the existing cluster template.
* `docker_volume_size` - (Optional) The size (in GB) of the Docker volume.
Changing this updates the Docker volume size of the existing cluster
template.
* `external_network_id` - (Optional) The ID of the external network that will
be used for the cluster. Changing this updates the external network ID of
the existing cluster template.
* `fixed_network` - (Optional) The fixed network that will be attached to the
cluster. Changing this updates the fixed network of the existing cluster
template.
* `fixed_subnet` - (Optional) The fixed subnet that will be attached to the
cluster. Changing this updates the fixed subnet of the existing cluster
template.
* `flavor` - (Optional) The flavor for the nodes of the cluster. Can be set via
the `OS_MAGNUM_FLAVOR` environment variable. Changing this updates the
flavor of the existing cluster template.
* `master_flavor` - (Optional) The flavor for the master nodes. Can be set via
the `OS_MAGNUM_MASTER_FLAVOR` environment variable. Changing this updates
the master flavor of the existing cluster template.
* `floating_ip_enabled` - (Optional) Indicates whether created cluster should
create floating IP for every node or not. Changing this updates the
floating IP enabled attribute of the existing cluster template.
* `http_proxy` - (Optional) The address of a proxy for receiving all HTTP
requests and relay them. Changing this updates the HTTP proxy address of
the existing cluster template.
* `https_proxy` - (Optional) The address of a proxy for receiving all HTTPS
requests and relay them. Changing this updates the HTTPS proxy address of
the existing cluster template.
* `image` - (Required) The reference to an image that is used for nodes of the
cluster. Can be set via the `OS_MAGNUM_IMAGE` environment variable.
Changing this updates the image attribute of the existing cluster template.
* `insecure_registry` - (Optional) The insecure registry URL for the cluster
template. Changing this updates the insecure registry attribute of the
existing cluster template.
* `keypair_id` - (Optional) The name of the Compute service SSH keypair.
Changing this updates the keypair of the existing cluster template.
* `labels` - (Optional) The list of key value pairs representing additional
properties of the cluster template. Changing this updates the labels of the
existing cluster template.
* `master_lb_enabled` - (Optional) Indicates whether created cluster should
has a loadbalancer for master nodes or not. Changing this updates the
attribute of the existing cluster template.
* `network_driver` - (Optional) The name of the driver for the container
network. Changing this updates the network driver of the existing cluster
template.
* `no_proxy` - (Optional) A comma-separated list of IP addresses that shouldn't
be used in the cluster. Changing this updates the no proxy list of the
existing cluster template.
* `public` - (Optional) Indicates whether cluster template should be public.
Changing this updates the public attribute of the existing cluster
template.
* `registry_enabled` - (Optional) Indicates whether Docker registry is enabled
in the cluster. Changing this updates the registry enabled attribute of the
existing cluster template.
* `server_type` - (Optional) The server type for the cluster template. Changing
this updates the server type of the existing cluster template.
* `tls_disabled` - (Optional) Indicates whether the TLS should be disabled in
the cluster. Changing this updates the attribute of the existing cluster.
* `volume_driver` - (Optional) The name of the driver that is used for the
volumes of the cluster nodes. Changing this updates the volume driver of
the existing cluster template.
## Attributes reference
The following attributes are exported:
* `region` - See Argument Reference above.
* `name` - See Argument Reference above.
* `project_id` - See Argument Reference above.
* `created_at` - The time at which cluster template was created.
* `updated_at` - The time at which cluster template was created.
* `apiserver_port` - See Argument Reference above.
* `coe` - See Argument Reference above.
* `cluster_distro` - See Argument Reference above.
* `dns_nameserver` - See Argument Reference above.
* `docker_storage_driver` - See Argument Reference above.
* `docker_volume_size` - See Argument Reference above.
* `external_network_id` - See Argument Reference above.
* `fixed_network` - See Argument Reference above.
* `fixed_subnet` - See Argument Reference above.
* `flavor` - See Argument Reference above.
* `master_flavor` - See Argument Reference above.
* `floating_ip_enabled` - See Argument Reference above.
* `http_proxy` - See Argument Reference above.
* `https_proxy` - See Argument Reference above.
* `image` - See Argument Reference above.
* `insecure_registry` - See Argument Reference above.
* `keypair_id` - See Argument Reference above.
* `labels` - See Argument Reference above.
* `links` - A list containing associated cluster template links.
* `master_lb_enabled` - See Argument Reference above.
* `network_driver` - See Argument Reference above.
* `no_proxy` - See Argument Reference above.
* `public` - See Argument Reference above.
* `registry_enabled` - See Argument Reference above.
* `server_type` - See Argument Reference above.
* `tls_disabled` - See Argument Reference above.
* `volume_driver` - See Argument Reference above.
## Import
Cluster | |
#!/usr/bin/env python
import argparse
import os
import sys
import csv
import h5py
import tensorflow.keras as keras
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import cv2
import SimpleITK as sitk
import time
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.enable_v2_behavior()
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import fl_covid.bin # noqa: F401
__package__ = "fl_covid.bin"
# Change these to absolute imports if you copy this script outside the fl_covid package.
from ..utils.anchors import compute_overlap
from .. import models
from ..preprocessing.csv_generator import CSVGenerator
from ..utils.eval import _compute_ap, _get_annotations, _get_annotations_and_img_path
from ..utils.config import read_config_file, parse_anchor_parameters
from ..utils.keras_version import check_keras_version
from ..utils.visualization import draw_detections, draw_annotations
from ..utils.visualization import draw_box, label_color, draw_caption
from ..utils.image import preprocess_image, resize_image
from fl_covid.bin.train_fed import create_models
from fl_covid.bin.evaluate_overall import fp_reduce
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def draw_label_hit(image, box, caption):
""" Draws a caption above the box in an image.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
caption : String containing the text to draw.
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0]+5, b[3] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0]+5, b[3] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
def draw_detections(image, boxes, scores, labels, color=None, label_to_name=None, slice_id=None, bbox_writer=None, score_threshold=0.4): # score_threshold used to be 0.5
""" Draws detections in an image.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
scores : A list of N classification scores.
labels : A list of N labels.
color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.
label_to_name : (optional) Functor for mapping a label to a name.
score_threshold : Threshold used for determining what detections to draw.
"""
selection = np.where(scores > score_threshold)[0]
for i in selection:
c = color if color is not None else label_color(labels[i])
if bbox_writer is not None and slice_id is not None:
tar_path = 'slice_{}.png'.format(slice_id)
b = np.array(boxes[i, :]).astype(int)
bbox_writer.writerow([tar_path]+ [b[0],b[1],b[2],b[3]]+['lesion'])
draw_box(image, boxes[i, :], color=c,thickness=1)
# draw labels
caption = (label_to_name(labels[i]) if label_to_name else str(labels[i])) + ': {0:.2f}'.format(scores[i])
draw_caption(image, boxes[i, :], caption)
def read_h5(img_path):
with h5py.File(img_path, "r") as hf:
arr = hf['arr'][:]
return arr
def draw_colorful_result(
args,
client_name,
patient_name,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None
):
def _parse(value, function, fmt):
"""Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise ValueError(fmt.format(e))
if args.reduce_fp:
sign = 'fp_reduced_'
else:
sign=''
bbox_result_path = os.path.join(save_path,'{}_{}_score_thres_{}_bbox.csv'.format(client_name, patient_name, score_threshold))
anno_result_path = os.path.join(save_path,'{}_{}_score_thres_{}_anno.csv'.format(client_name, patient_name, score_threshold))
all_annotations_img_path = np.load(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), allow_pickle=True)
# prepare annotation result
anno_result = {}
annos = open(anno_result_path, 'r')
classes = {'lesion': 0}
for line, row in enumerate(annos):
splits = row.split(',')
# print(splits)
# print(len(splits))
try:
img_file, x1, y1, x2, y2, class_name, hit_cnt = splits
hit_cnt = hit_cnt.replace('\n', '')
except ValueError:
raise ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line))
if img_file not in anno_result:
anno_result[img_file] = []
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
hit_cnt = _parse(hit_cnt, int, 'line {}: malformed hit count: {{}}'.format(line))
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if str(class_name) not in classes:
raise ValueError(
'line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
anno_result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name, 'hit_cnt':hit_cnt})
# prepare prediction bbox result
bbox_result = {}
bboxs = open(bbox_result_path, 'r')
classes = {'lesion': 0}
for line, row in enumerate(bboxs):
splits = row.split(',')
try:
img_file, x1, y1, x2, y2, class_name, score, box_type = splits
box_type = box_type.replace('\n', '')
except ValueError:
raise ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line))
if img_file not in bbox_result:
bbox_result[img_file] = []
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if str(class_name) not in classes:
raise ValueError(
'line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
bbox_result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name, 'score':score, 'box_type': str(box_type)})
detection_out = np.zeros([len(all_annotations_img_path), 512, 512, 3])
for i in tqdm(range(len(all_annotations_img_path)), desc='Drawing colorful {} result on {} {}: '.format(sign, client_name, patient_name)):
img_path = all_annotations_img_path[i]
raw_img = read_h5(img_path)
# print(img_path)
image = raw_img.copy()
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
if img_path in anno_result:
for anno_index in range(len(anno_result[img_path])):
# draw annotation
hit_cnt = anno_result[img_path][anno_index]['hit_cnt']
caption = '{}'.format(hit_cnt)
anno_box = [anno_result[img_path][anno_index]['x1'], anno_result[img_path][anno_index]['y1'], anno_result[img_path][anno_index]['x2'],anno_result[img_path][anno_index]['y2']]
draw_label_hit(image, anno_box , caption)
draw_box(image, anno_box, color=[0,255,0], thickness=1)
if img_path in bbox_result:
for bbox_index in range(len(bbox_result[img_path])):
pred_box = [bbox_result[img_path][bbox_index]['x1'], bbox_result[img_path][bbox_index]['y1'], bbox_result[img_path][bbox_index]['x2'],bbox_result[img_path][bbox_index]['y2']]
box_type = str(bbox_result[img_path][bbox_index]['box_type'])
score = float(bbox_result[img_path][bbox_index]['score'])
# print(box_type)
# print('assigned_gt')
# print(box_type=='assigned_gt')
if box_type == 'max_overlap':
box_color = [31, 0, 255]
elif box_type == 'assigned_pre':
box_color =[184, 0, 255]
elif box_type == 'assigned_gt':
box_color = [139, 69, 19]
elif box_type == 'fp':
box_color = [225, 0, 0]
else:
raise ValueError("Unknown box type :{}".format(box_type))
draw_box(image, pred_box, color=box_color, thickness=1)
caption = ('{0:.2f}'.format(score))
draw_caption(image, pred_box, caption)
detection_out[i, :, :] = image
print('Writing colorful results on {} {}...'.format(client_name, patient_name))
detection_out = sitk.GetImageFromArray(detection_out)
sitk.WriteImage(detection_out, os.path.join(save_path, '{}_{}_colorful_detection_{}result.nii.gz'.format(client_name, patient_name, sign)))
def create_generator(args):
""" Create generators for evaluation.
"""
if args.dataset_type == 'csv':
validation_generator = CSVGenerator(
args.annotations,
args.classes,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side,
config=args.config,
shuffle_groups=False
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return validation_generator
def _seg_filter(bboxes,scores_sort,seg):
image_boxes = bboxes
inner = np.asarray([],dtype=np.bool)
flag = False
for i in range(image_boxes.shape[0]):
x1 = int(image_boxes[i][0])
y1 = int(image_boxes[i][1])
x2 = int(image_boxes[i][2])
y2 = int(image_boxes[i][3])
x1 = 511 if x1 > 511 else x1
y1 = 511 if y1 > 511 else y1
x2 = 511 if x2 > 511 else x2
y2 = 511 if y2 > 511 else y2
# print(scores_sort)
# print(scores_sort.shape)
if (seg[y1,x1,:] == 0).all() and (seg[y2,x2,:] == 0).all() and (seg[y1,x2,:] == 0).all() and (seg[y2,x1,:] == 0).all():
inner = np.append(inner,False)
flag=True
# scores_sort = np.delete(scores_sort,i,axis=0)
else:
inner = np.append(inner, True)
# print(inner)
# cnt = 1
# if flag:
# if cnt > 0:
# print("FP out of lung filtered")
# cnt -= 1
scores_sort = scores_sort[inner]
# print('scores_sort after filter')
# print(scores_sort.shape)
# print(scores_sort)
return scores_sort
def _print_detections_to_npy(args, generator, model, client_idx, client_name, patient_name, score_threshold=0.05, max_detections=100, save_path=None):
all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())]
detection_out = np.zeros([generator.size(),512,512,3])
# detection_out = np.zeros([generator.size(),512,512])
attention_out = np.zeros([generator.size(),512,512])
mask_out = np.zeros([generator.size(),512,512])
results = open(os.path.join(save_path, '{}_{}_output_bbox.csv'.format(client_name, patient_name)), 'w', newline='')
result_writer = csv.writer(results, delimiter=',')
for i in tqdm(range(generator.size()), desc='Running network on {}_{}: '.format(client_name, patient_name)):
raw_image = generator.load_image(i)
# image = np.expand_dims(raw_image.copy(), axis=-1)
# image = np.repeat(image, 3, axis=-1)
# image = generator.preprocess_image(image)
image = generator.preprocess_image(raw_image.copy())
image, scale = generator.resize_image(image)
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
# run network
# boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3]
boxes, scores, labels, masks, attention_map = model.predict_on_batch(np.expand_dims(image, axis=0))
# print('boxes:', boxes.shape)
# print('scores:', scores.shape)
# print('labels',labels.shape)
# correct boxes for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > -1)[0]
# print('indices', indices)
# print(type(scores))
if type(scores) is not np.ndarray:
scores = scores.numpy()
boxes = boxes.numpy()
labels = labels.numpy()
masks = masks.numpy()
attention_map = attention_map.numpy()
# select those scores
scores = scores[0][indices]
# find the order with | |
dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
dtable = db.doc_document
query = (table.doc_id == dtable.doc_id) & \
(dtable.deleted == False)
documents = db(query).select(dtable.file)
if documents:
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
filename = doc.file
try:
doc_name = retrieve(filename)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[filename])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
icon = series.lower().replace(" ", "_")
card_label = TAG[""](I(_class="icon icon-%s" % icon),
SPAN(" %s" % T(series),
_class="card-title"))
# Type cards
if series == "Incident":
# Apply additional highlighting for Incidents
item_class = "%s disaster" % item_class
# Render the item
item = DIV(DIV(card_label,
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
#edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def cms_post_age(row):
"""
The age of the post
- used for colour-coding markers of Incidents
"""
if hasattr(row, "cms_post"):
row = row.cms_post
try:
date = row.date
except:
# not available
return current.messages["NONE"]
now = current.request.utcnow
age = now - date
if age < timedelta(days=2):
return 1
elif age < timedelta(days=7):
return 2
else:
return 3
# -----------------------------------------------------------------------------
def customize_cms_post(**attr):
"""
Customize cms_post controller
"""
s3db = current.s3db
s3 = current.response.s3
#s3db.configure("cms_post",
# marker_fn=cms_post_marker_fn,
# )
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
# Called first so that we can unhide the Type field
result = standard_prep(r)
if not result:
return False
if r.interactive:
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
table = customize_cms_post_fields()
get_vars = current.request.get_vars
field = table.series_id
field.label = T("Type")
if r.method == "read":
# Restore the label for the Location
table.location_id.label = T("Location")
refresh = get_vars.get("refresh", None)
if refresh == "datalist":
# We must be coming from the News Feed page so can change the type on-the-fly
field.readable = field.writable = True
#field.requires = field.requires.other
#field = table.name
#field.readable = field.writable = False
#field = table.title
#field.readable = field.writable = False
field = table.avatar
field.default = True
#field.readable = field.writable = False
field = table.replies
field.default = False
#field.readable = field.writable = False
field = table.body
field.label = T("Description")
field.widget = None
#table.comments.readable = table.comments.writable = False
if current.request.controller == "default":
# Don't override card layout for News Feed/Homepage
return True
# Filter from a Profile page?
# If so, then default the fields we know
location_id = get_vars.get("~.(location)", None)
if location_id:
table.location_id.default = location_id
# event_id = get_vars.get("~.(event)", None)
# if event_id:
# crud_form = S3SQLCustomForm(
# "date",
# "series_id",
# "body",
# "location_id",
# S3SQLInlineComponent(
# "document",
# name = "file",
# label = T("Files"),
# fields = ["file",
##"comments",
# ],
# ),
# )
# def create_onaccept(form):
# table = current.s3db.event_event_post
# table.insert(event_id=event_id, post_id=form.vars.id)
# s3db.configure("cms_post",
# create_onaccept = create_onaccept,
# )
# else:
crud_form = S3SQLCustomForm(
"date",
"series_id",
"body",
"location_id",
#S3SQLInlineComponent(
# "event_post",
# #label = T("Disaster(s)"),
# label = T("Disaster"),
# multiple = False,
# fields = ["event_id"],
# orderby = "event_id$name",
#),
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = ["file",
#"comments",
],
),
)
# Return to List view after create/update/delete
# We now do all this in Popups
#url_next = URL(c="default", f="index", args="newsfeed")
s3db.configure("cms_post",
#create_next = url_next,
#delete_next = url_next,
#update_next = url_next,
crud_form = crud_form,
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_posts,
)
s3.cancel = True
elif r.representation == "xls":
table = r.table
table.created_by.represent = s3_auth_user_represent_name
#table.created_on.represent = datetime_represent
utable = current.auth.settings.table_user
utable.organisation_id.represent = s3db.org_organisation_represent
list_fields = [
(T("Date"), "date"),
(T("Disaster"), "event_post.event_id"),
(T("Type"), "series_id"),
(T("Details"), "body"),
(T("District"), "location_id$L1"),
(T("Sub-District"), "location_id$L2"),
(T("Suco"), "location_id$L3"),
(T("Author"), "created_by"),
(T("Organization"), "created_by$organisation_id"),
]
s3db.configure("cms_post",
list_fields = list_fields,
)
elif r.representation == "plain" and \
r.method != "search":
# Map Popups
table = r.table
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.created_by.represent = s3_auth_user_represent_name
# Used by default popups
series = T(table.series_id.represent(r.record.series_id))
s3.crud_strings["cms_post"].title_display = "%(series)s Details" % dict(series=series)
s3db.configure("cms_post",
popup_url="",
)
table.avatar.readable = False
table.body.label = ""
table.expired.readable = False
table.replies.readable = False
table.created_by.readable = True
table.created_by.label = T("Author")
# Used by cms_post_popup
#table.created_on.represent = datetime_represent
elif r.representation == "geojson":
r.table.age = Field.Lazy(cms_post_age)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "form" in output:
output["form"].add_class("cms_post")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("cms_post")
elif r.representation == "plain" and \
r.method != "search":
# Map Popups
#output = cms_post_popup(r)
pass
return output
s3.postp = custom_postp
return attr
settings.ui.customize_cms_post = customize_cms_post
# -----------------------------------------------------------------------------
def customize_event_event(**attr):
"""
Customize event_event controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
if r.interactive:
ADD_EVENT = T("New Disaster")
s3.crud_strings["event_event"] = Storage(
title_create = ADD_EVENT,
title_display = T("Disaster Details"),
title_list = T("Disasters"),
title_update = T("Edit Disaster"),
title_search = T("Search Disasters"),
subtitle_create = T("Add New Disaster"),
label_list_button = T("List Disasters"),
label_create_button = ADD_EVENT,
label_delete_button = T("Delete Disaster"),
msg_record_created = T("Disaster added"),
msg_record_modified = T("Disaster updated"),
msg_record_deleted = T("Disaster deleted"),
msg_list_empty = T("No Disasters currently registered"))
db = current.db
s3db = current.s3db
# Load normal Model
table = s3db.event_event
table.exercise.label = T("Is this an Exercise?")
table.zero_hour.label = T("Start Time")
if r.method =="datalist":
# Disaster selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
elif r.method == "profile":
# Customise the cms_post table as that is used for the widgets
customize_cms_post_fields()
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
gtable = db.gis_location
ltable = db.event_event_location
query = (ltable.event_id == r.id) & \
(ltable.location_id == gtable.id)
location = db(query).select(gtable.id,
gtable.lat_max,
gtable.lon_max,
gtable.lat_min,
gtable.lon_min,
limitby=(0, 1)).first()
if location:
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
}
default = "~.(location)=%s" % location.id
else:
# Default bounds
bbox = {}
# No default Location
default = None
map_widget = dict(label = "Map",
type = "map",
context = "event",
icon = "icon-map",
height = 383,
width = 568,
bbox = bbox,
)
incidents_widget = dict(label = "Incidents",
title_create = "Add New Incident",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = S3FieldSelector("series_id$name") == "Incident",
icon = "icon-incident",
layer = "Incidents",
# provided by Catalogue Layer
#marker = "incident",
list_layout = render_profile_posts,
)
assessments_widget = dict(label = "Assessments",
title_create = "Add New Assessment",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = S3FieldSelector("series_id$name") == "Assessment",
icon = "icon-assessment",
layer = "Assessments",
# provided by Catalogue Layer
#marker = "assessment",
list_layout = render_profile_posts,
)
activities_widget = dict(label = "Activities",
title_create = "Add New Activity",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = S3FieldSelector("series_id$name") == "Activity",
icon = "icon-activity",
layer = "Activities",
# provided by Catalogue Layer
#marker = "activity",
list_layout = render_profile_posts,
)
reports_widget = dict(label = "Reports",
title_create = "Add New Report",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = S3FieldSelector("series_id$name") == "Report",
icon = "icon-report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
#comments_widget = dict(label = "Comments",
# type = "comments",
# icon = "icon-comments-alt",
# colspan = 2,
# )
record = r.record
ttable = db.event_event_type
event_type = db(ttable.id == record.event_type_id).select(ttable.name,
limitby=(0, 1),
).first().name
s3db.configure("event_event",
profile_title = "%s : %s" % (s3.crud_strings["event_event"].title_list,
record.name),
profile_header = DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="themes",
args=["DRMP", "img",
"%s.png" % event_type]),
),
_class="pull-left",
#_href=event_url,
),
H2(record.name),
#P(record.comments),
_class="profile_header",
),
profile_widgets | |
return self
def clearId(self):
self.checkMutable()
self._id = 0
self._clearPresent('id')
return self
def hasOptions(self):
return self._isPresent('options')
def getOptions(self):
return self._options
def getMutableOptions(self):
self.checkMutable()
if self._options is MethodOptions.defaultInstance() or not self._options.isMutable():
self._setPresent('options')
self._options = self._options.shallowCopy()
return self._options
def setOptions(self, options):
self.checkMutable()
self._options = options
self._setPresent('options')
return self
def clearOptions(self):
self.checkMutable()
self._options = MethodOptions.defaultInstance()
self._clearPresent('options')
return self
__structs__ = (
Field,
Param,
Method,
)
__enums__ = ()
__fields__ = (
('options', 1, 19, -1),
('baseType', 3, 20, 0),
('typeId', 4, 1, -1),
('fields', 5, 9, -1),
('structs', 6, 5, -1),
('enums', 7, 3, -1),
('extensions', 8, 7, -1),
('minExtension', 9, 1, -1),
('maxExtension', 10, 1, -1),
)
__extensions__ = ()
TYPE_ID = 30
def __init__(self):
super().__init__()
self._options = StructOptions.defaultInstance()
self._baseType = None
self._typeId = 0
self._fields = coda.runtime.Object.EMPTY_LIST
self._structs = coda.runtime.Object.EMPTY_LIST
self._enums = coda.runtime.Object.EMPTY_LIST
self._extensions = coda.runtime.Object.EMPTY_LIST
self._minExtension = 0
self._maxExtension = 0
def _equalsImpl(self, other):
return (super()._equalsImpl(other) and
self._options == other._options and
self._baseType == other._baseType and
self._typeId == other._typeId and
self._fields == other._fields and
self._structs == other._structs and
self._enums == other._enums and
self._extensions == other._extensions and
self._minExtension == other._minExtension and
self._maxExtension == other._maxExtension)
def _hashImpl(self):
return hash((super()._hashImpl(),
self._options,
self._baseType,
self._typeId,
self._fields,
self._structs,
self._enums,
self._extensions,
self._minExtension,
self._maxExtension))
def _freezeImpl(self, deep=True):
super()._freezeImpl(deep)
if deep and self._options and self._options.isMutable():
self._options.freeze(deep)
if deep and self._baseType and self._baseType.isMutable():
self._baseType.freeze(deep)
if type(self._fields) is not tuple:
self._fields = tuple(self._fields)
if type(self._structs) is not tuple:
self._structs = tuple(self._structs)
if type(self._enums) is not tuple:
self._enums = tuple(self._enums)
if type(self._extensions) is not tuple:
self._extensions = tuple(self._extensions)
def _writeFields(self, encoder):
encoder.writeSubtypeHeader('StructType', 30)
if self.hasOptions():
encoder.writeFieldHeader('options', 1)
encoder.writeStruct(self._options, True)
if self.hasBaseType():
encoder.writeFieldHeader('baseType', 3)
encoder.writeStruct(self._baseType, True)
if self.hasTypeId():
encoder.writeFieldHeader('typeId', 4)
encoder.writeInteger(self._typeId)
if len(self._fields):
encoder.writeFieldHeader('fields', 5)
encoder.writeBeginList(30, len(self._fields))
for val in self._fields:
encoder.writeStruct(val)
encoder.writeEndList()
if len(self._structs):
encoder.writeFieldHeader('structs', 6)
encoder.writeBeginList(30, len(self._structs))
for val in self._structs:
encoder.writeStruct(val, True)
encoder.writeEndList()
if len(self._enums):
encoder.writeFieldHeader('enums', 7)
encoder.writeBeginList(30, len(self._enums))
for val in self._enums:
encoder.writeStruct(val, True)
encoder.writeEndList()
if len(self._extensions):
encoder.writeFieldHeader('extensions', 8)
encoder.writeBeginList(30, len(self._extensions))
for val in self._extensions:
encoder.writeStruct(val)
encoder.writeEndList()
if self.hasMinExtension():
encoder.writeFieldHeader('minExtension', 9)
encoder.writeInteger(self._minExtension)
if self.hasMaxExtension():
encoder.writeFieldHeader('maxExtension', 10)
encoder.writeInteger(self._maxExtension)
super()._writeFields(encoder)
def merge(self, src):
super().merge(src)
if src.hasOptions():
self.setOptions(src.getOptions())
if src.hasBaseType():
self.setBaseType(src.getBaseType())
if src.hasTypeId():
self.setTypeId(src.getTypeId())
self.getMutableFields().extend(src.getFields())
self.getMutableStructs().extend(src.getStructs())
self.getMutableEnums().extend(src.getEnums())
self.getMutableExtensions().extend(src.getExtensions())
if src.hasMinExtension():
self.setMinExtension(src.getMinExtension())
if src.hasMaxExtension():
self.setMaxExtension(src.getMaxExtension())
return self
def hasOptions(self):
return self._isPresent('options')
def getOptions(self):
return self._options
def getMutableOptions(self):
self.checkMutable()
if self._options is StructOptions.defaultInstance() or not self._options.isMutable():
self._setPresent('options')
self._options = self._options.shallowCopy()
return self._options
def setOptions(self, options):
self.checkMutable()
self._options = options
self._setPresent('options')
return self
def clearOptions(self):
self.checkMutable()
self._options = StructOptions.defaultInstance()
self._clearPresent('options')
return self
def hasBaseType(self):
return self._isPresent('baseType')
def getBaseType(self):
return self._baseType
def getMutableBaseType(self):
self.checkMutable()
if self._baseType is None or not self._baseType.isMutable():
self._setPresent('baseType')
self._baseType = self._baseType.shallowCopy()
return self._baseType
def setBaseType(self, baseType):
self.checkMutable()
self._baseType = baseType
self._setPresent('baseType')
return self
def clearBaseType(self):
self.checkMutable()
self._baseType = None
self._clearPresent('baseType')
return self
def hasTypeId(self):
return self._isPresent('typeId')
def getTypeId(self):
return self._typeId
def setTypeId(self, typeId):
self.checkMutable()
self._typeId = typeId
self._setPresent('typeId')
return self
def clearTypeId(self):
self.checkMutable()
self._typeId = 0
self._clearPresent('typeId')
return self
def getFields(self):
return self._fields
def getMutableFields(self):
self.checkMutable()
if self._fields is coda.runtime.Object.EMPTY_LIST:
self._fields = []
return self._fields
def setFields(self, fields):
self.checkMutable()
self._fields = fields
return self
def clearFields(self):
self.checkMutable()
self._fields = coda.runtime.Object.EMPTY_LIST
return self
def getStructs(self):
return self._structs
def getMutableStructs(self):
self.checkMutable()
if self._structs is coda.runtime.Object.EMPTY_LIST:
self._structs = []
return self._structs
def setStructs(self, structs):
self.checkMutable()
self._structs = structs
return self
def clearStructs(self):
self.checkMutable()
self._structs = coda.runtime.Object.EMPTY_LIST
return self
def getEnums(self):
return self._enums
def getMutableEnums(self):
self.checkMutable()
if self._enums is coda.runtime.Object.EMPTY_LIST:
self._enums = []
return self._enums
def setEnums(self, enums):
self.checkMutable()
self._enums = enums
return self
def clearEnums(self):
self.checkMutable()
self._enums = coda.runtime.Object.EMPTY_LIST
return self
def getExtensions(self):
return self._extensions
def getMutableExtensions(self):
self.checkMutable()
if self._extensions is coda.runtime.Object.EMPTY_LIST:
self._extensions = []
return self._extensions
def setExtensions(self, extensions):
self.checkMutable()
self._extensions = extensions
return self
def clearExtensions(self):
self.checkMutable()
self._extensions = coda.runtime.Object.EMPTY_LIST
return self
def hasMinExtension(self):
return self._isPresent('minExtension')
def getMinExtension(self):
return self._minExtension
def setMinExtension(self, minExtension):
self.checkMutable()
self._minExtension = minExtension
self._setPresent('minExtension')
return self
def clearMinExtension(self):
self.checkMutable()
self._minExtension = 0
self._clearPresent('minExtension')
return self
def hasMaxExtension(self):
return self._isPresent('maxExtension')
def getMaxExtension(self):
return self._maxExtension
def setMaxExtension(self, maxExtension):
self.checkMutable()
self._maxExtension = maxExtension
self._setPresent('maxExtension')
return self
def clearMaxExtension(self):
self.checkMutable()
self._maxExtension = 0
self._clearPresent('maxExtension')
return self
# =============================================================================
# EnumType
# =============================================================================
class EnumType(DeclType):
__slots__ = [
'_options',
'_values',
]
class Value(coda.runtime.Object):
__slots__ = [
'_name',
'_value',
]
__structs__ = ()
__enums__ = ()
__fields__ = (
('name', 1, 2, -1),
('value', 2, 1, -1),
)
__extensions__ = ()
TYPE_ID = None
def __init__(self):
super().__init__()
self._name = coda.runtime.Object.EMPTY_STRING
self._value = 0
def _equalsImpl(self, other):
return (super()._equalsImpl(other) and
self._name == other._name and
self._value == other._value)
def _hashImpl(self):
return hash((super()._hashImpl(),
self._name,
self._value))
def _writeFields(self, encoder):
if self.hasName():
encoder.writeFieldHeader('name', 1)
encoder.writeString(self._name)
if self.hasValue():
encoder.writeFieldHeader('value', 2)
encoder.writeInteger(self._value)
def merge(self, src):
if src.hasName():
self.setName(src.getName())
if src.hasValue():
self.setValue(src.getValue())
return self
def hasName(self):
return self._isPresent('name')
def getName(self):
return self._name
def setName(self, name):
self.checkMutable()
self._name = name
self._setPresent('name')
return self
def clearName(self):
self.checkMutable()
self._name = coda.runtime.Object.EMPTY_STRING
self._clearPresent('name')
return self
def hasValue(self):
return self._isPresent('value')
def getValue(self):
return self._value
def setValue(self, value):
self.checkMutable()
self._value = value
self._setPresent('value')
return self
def clearValue(self):
self.checkMutable()
self._value = 0
self._clearPresent('value')
return self
__structs__ = (
Value,
)
__enums__ = ()
__fields__ = (
('options', 1, 15, -1),
('values', 2, 6, -1),
)
__extensions__ = ()
TYPE_ID = 31
def __init__(self):
super().__init__()
self._options = EnumOptions.defaultInstance()
self._values = coda.runtime.Object.EMPTY_LIST
def _equalsImpl(self, other):
return (super()._equalsImpl(other) and
self._options == other._options and
self._values == other._values)
def _hashImpl(self):
return hash((super()._hashImpl(),
self._options,
self._values))
def _freezeImpl(self, deep=True):
super()._freezeImpl(deep)
if deep and self._options and self._options.isMutable():
self._options.freeze(deep)
if type(self._values) is not tuple:
self._values = tuple(self._values)
def _writeFields(self, encoder):
encoder.writeSubtypeHeader('EnumType', 31)
if self.hasOptions():
encoder.writeFieldHeader('options', 1)
encoder.writeStruct(self._options, True)
if len(self._values):
encoder.writeFieldHeader('values', 2)
encoder.writeBeginList(30, len(self._values))
for val in self._values:
encoder.writeStruct(val)
encoder.writeEndList()
super()._writeFields(encoder)
def merge(self, src):
super().merge(src)
if src.hasOptions():
self.setOptions(src.getOptions())
self.getMutableValues().extend(src.getValues())
return self
def hasOptions(self):
return self._isPresent('options')
def getOptions(self):
return self._options
def getMutableOptions(self):
self.checkMutable()
if self._options is EnumOptions.defaultInstance() or not self._options.isMutable():
self._setPresent('options')
self._options = self._options.shallowCopy()
return self._options
def setOptions(self, options):
self.checkMutable()
self._options = options
self._setPresent('options')
return self
def clearOptions(self):
self.checkMutable()
self._options = EnumOptions.defaultInstance()
self._clearPresent('options')
return self
def getValues(self):
return self._values
def getMutableValues(self):
self.checkMutable()
if self._values is coda.runtime.Object.EMPTY_LIST:
self._values = []
return self._values
def setValues(self, values):
self.checkMutable()
self._values = values
return self
def clearValues(self):
self.checkMutable()
self._values = coda.runtime.Object.EMPTY_LIST
return self
# =============================================================================
# ExtensionField
# =============================================================================
class ExtensionField(coda.runtime.Object):
__slots__ = [
'_file',
'_enclosingType',
'_sourceLine',
'_extends',
'_name',
'_id',
'_type',
]
__structs__ = ()
__enums__ = ()
__fields__ = (
('file', 1, 17, 0),
('enclosingType', 2, 23, -1),
('sourceLine', 3, 1, -1),
('extends', 4, 20, -1),
('name', 5, 2, -1),
('id', 6, 1, -1),
('type', 7, 21, -1),
)
__extensions__ = ()
TYPE_ID = None
def __init__(self):
super().__init__()
self._file = None
self._enclosingType = StructType.defaultInstance()
self._sourceLine = 0
self._extends = StructType.defaultInstance()
self._name = coda.runtime.Object.EMPTY_STRING
self._id = 0
self._type = Type.defaultInstance()
def _equalsImpl(self, other):
return (super()._equalsImpl(other) and
self._file == other._file and
self._enclosingType == other._enclosingType and
self._sourceLine == other._sourceLine and
self._extends == other._extends and
self._name == other._name and
self._id == other._id and
self._type == other._type)
def _hashImpl(self):
return hash((super()._hashImpl(),
self._file,
self._enclosingType,
self._sourceLine,
self._extends,
self._name,
self._id,
self._type))
def _freezeImpl(self, deep=True):
super()._freezeImpl(deep)
if deep and self._file and self._file.isMutable():
self._file.freeze(deep)
if deep and self._enclosingType and self._enclosingType.isMutable():
self._enclosingType.freeze(deep)
if deep and self._extends and self._extends.isMutable():
self._extends.freeze(deep)
if deep and self._type and self._type.isMutable():
self._type.freeze(deep)
def _writeFields(self, encoder):
if self.hasFile():
encoder.writeFieldHeader('file', 1)
encoder.writeStruct(self._file, True)
if self.hasEnclosingType():
encoder.writeFieldHeader('enclosingType', 2)
encoder.writeStruct(self._enclosingType, True)
if self.hasSourceLine():
encoder.writeFieldHeader('sourceLine', 3)
encoder.writeInteger(self._sourceLine)
if self.hasExtends():
encoder.writeFieldHeader('extends', 4)
encoder.writeStruct(self._extends, True)
if self.hasName():
encoder.writeFieldHeader('name', 5)
encoder.writeString(self._name)
if self.hasId():
encoder.writeFieldHeader('id', 6)
encoder.writeInteger(self._id)
if self.hasType():
encoder.writeFieldHeader('type', 7)
encoder.writeStruct(self._type, True)
def merge(self, src):
if src.hasFile():
self.setFile(src.getFile())
if src.hasEnclosingType():
self.setEnclosingType(src.getEnclosingType())
if src.hasSourceLine():
self.setSourceLine(src.getSourceLine())
if src.hasExtends():
self.setExtends(src.getExtends())
if src.hasName():
self.setName(src.getName())
if src.hasId():
self.setId(src.getId())
if src.hasType():
self.setType(src.getType())
return self
def hasFile(self):
return self._isPresent('file')
def getFile(self):
return self._file
def getMutableFile(self):
self.checkMutable()
if self._file is None or not self._file.isMutable():
self._setPresent('file')
self._file = self._file.shallowCopy()
return self._file
def setFile(self, file):
self.checkMutable()
self._file = file
self._setPresent('file')
return self
def clearFile(self):
self.checkMutable()
self._file = None
self._clearPresent('file')
return self
def hasEnclosingType(self):
return self._isPresent('enclosingType')
def getEnclosingType(self):
return self._enclosingType
def getMutableEnclosingType(self):
self.checkMutable()
if self._enclosingType is StructType.defaultInstance() or not self._enclosingType.isMutable():
self._setPresent('enclosingType')
self._enclosingType = self._enclosingType.shallowCopy()
return self._enclosingType
def setEnclosingType(self, enclosingType):
self.checkMutable()
| |
str_cmdLine, self.jid,
number_of_workers, cpu_limit, memory_limit, gpu_limit,
incoming_dir, outgoing_dir)
self.dp.qprint('Returning from openshift job...')
def json_filePart_get(self, **kwargs):
"""
If the requested path is *within* a json "file" on the
DB, then we need to find the file, and map the relevant
path to components in that file.
"""
def DB_get(self, **kwargs):
"""
Returns part of the DB tree based on path spec in the URL
"""
r = C_stree()
p = self.within.ptree
pcwd = p.cwd()
str_URLpath = "/api/v1/"
for k,v in kwargs.items():
if k == 'path': str_URLpath = v
str_path = '/' + '/'.join(str_URLpath.split('/')[3:])
self.dp.qprint("path = %s" % str_path)
if str_path == '/':
# If root node, only return list of jobs
l_rootdir = p.lstr_lsnode(str_path)
r.mknode(l_rootdir)
else:
# Here is a hidden behaviour. If the 'root' dir starts
# with an underscore, then replace that component of
# the path with the actual name in list order.
# This is simply a short hand way to access indexed
# offsets.
l_path = str_path.split('/')
jobID = l_path[1]
# Does the jobID start with an underscore?
if jobID[0] == '_':
jobOffset = jobID[1:]
l_rootdir = list(p.lstr_lsnode('/'))
self.dp.qprint('jobOffset = %s' % jobOffset)
self.dp.qprint(l_rootdir)
try:
actualJob = l_rootdir[int(jobOffset)]
except:
return False
l_path[1] = actualJob
str_path = '/'.join(l_path)
r.mkdir(str_path)
r.cd(str_path)
r.cd('../')
# if not r.graft(p, str_path):
if not p.copy(startPath = str_path, destination = r)['status']:
# We are probably trying to access a file...
# First, remove the erroneous path in the return DB
r.rm(str_path)
# Now, we need to find the "file", parse the json layer
# and save...
n = 0
contents = p.cat(str_path)
str_pathFile = str_path
l_path = str_path.split('/')
totalPathLen = len(l_path)
l_pathFile = []
while not contents and -1*n < totalPathLen:
n -= 1
str_pathFile = '/'.join(str_path.split('/')[0:n])
contents = p.cat(str_pathFile)
l_pathFile.append(l_path[n])
if contents and n<0:
l_pathFile = l_pathFile[::-1]
str_access = ""
for l in l_pathFile:
str_access += "['%s']" % l
self.dp.qprint('str_access = %s' % str_access)
try:
contents = eval('contents%s' % str_access)
except:
contents = False
r.touch(str_path, contents)
p.cd(pcwd)
self.dp.qprint(r)
# self.dp.qprint(dict(r.snode_root))
self.dp.qprint(self.pp.pformat(dict(r.snode_root)).strip())
return dict(r.snode_root)
# return r
def process(self, request, **kwargs):
""" Process the message from remote client
In some philosophical respects, this process() method in fact implements
REST-like API of its own.
"""
if len(request):
REST_header = ""
REST_verb = ""
str_path = ""
json_payload = ""
self.dp.qprint("Listener ID - %s: process() - handling request" % (self.worker_id))
now = datetime.datetime.today()
str_timeStamp = now.strftime('%Y-%m-%d %H:%M:%S.%f')
self.dp.qprint(Colors.YELLOW)
self.dp.qprint("***********************************************")
self.dp.qprint("***********************************************")
self.dp.qprint("%s incoming data stream" % (str_timeStamp) )
self.dp.qprint("***********************************************")
self.dp.qprint("len = %d" % len(request))
self.dp.qprint("***********************************************")
self.dp.qprint(Colors.CYAN + "%s\n" % (request.decode()) + Colors.YELLOW)
self.dp.qprint("***********************************************" + Colors.NO_COLOUR)
l_raw = request.decode().split('\n')
FORMtype = l_raw[0].split('/')[0]
self.dp.qprint('Request = ...')
self.dp.qprint(l_raw)
REST_header = l_raw[0]
REST_verb = REST_header.split()[0]
str_path = REST_header.split()[1]
json_payload = l_raw[-1]
# remove trailing '/' if any on path
if str_path[-1] == '/': str_path = str_path[0:-1]
d_ret = {'status': False,
'RESTheader': REST_header,
'RESTverb': REST_verb,
'action': "",
'path': str_path,
'receivedByServer': l_raw}
self.dp.qprint("Using token authentication: %s" % self.b_tokenAuth)
if (not self.b_tokenAuth) or self.authModule.authorizeClientRequest(request.decode())[0]:
self.dp.qprint("Request authorized")
if REST_verb == 'GET':
d_ret['GET'] = self.DB_get(path = str_path)
d_ret['status'] = True
self.dp.qprint('json_payload = %s' % self.pp.pformat(json_payload).strip())
d_ret['client_json_payload'] = json_payload
d_ret['client_json_len'] = len(json_payload)
if len(json_payload):
d_payload = json.loads(json_payload)
d_request = d_payload['payload']
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
d_ret['payloadsize']= len(json_payload)
if payload_verb == 'quit':
self.dp.qprint('Shutting down server...')
d_ret['status'] = True
if payload_verb == 'run' and REST_verb == 'PUT':
d_ret['action'] = payload_verb
self.processPUT( request = d_request)
d_ret['status'] = True
if REST_verb == 'POST':
self.processPOST( request = d_request,
ret = d_ret)
else:
self.dp.qprint("Request unauthorized")
return d_ret
else:
return False
def methodName_parse(self, **kwargs):
"""
Construct the processing method name (string) by parsing the
d_meta dictionary.
"""
d_meta = {}
str_method = "" # The main 'parent' method
str_methodSuffix = "" # A possible 'subclass' specialization
for k,v in kwargs.items():
if k == 'request': d_request= v
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
if 'container' in d_meta.keys():
if self.container_env == 'openshift':
# append suffix _openshift to redirect to openshift function
str_methodSuffix = '_openshift'
elif self.container_env == 'swarm':
# append suffix _container to redirect to container function
str_methodSuffix = '_container'
str_method = 't_%s_process%s' % (payload_verb, str_methodSuffix)
return str_method
def processPOST(self, **kwargs):
"""
Dispatcher for POST
"""
for k,v in kwargs.items():
if k == 'request': d_request = v
if k == 'ret': d_ret = v
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
d_ret['action'] = payload_verb
d_ret['meta'] = d_meta
b_threaded = False
if 'threaded' in d_meta.keys():
b_threaded = d_meta['threaded']
if b_threaded:
self.dp.qprint("Will process request in new thread.")
pf_method = None
str_method = self.methodName_parse(request = d_request)
# str_method = 't_%s_process' % payload_verb
try:
pf_method = getattr(self, str_method)
except AttributeError:
raise NotImplementedError("Class `{}` does not implement `{}`".format(pman.__class__.__name__, str_method))
t_process = threading.Thread( target = pf_method,
args = (),
kwargs = kwargs)
t_process.start()
time.sleep(0.1)
# if payload_verb == 'run':
# d_ret['jobRootDir'] = self.str_jobRootDir
d_ret['status'] = True
else:
self.dp.qprint("Will process request in current thread.")
d_done = eval("self.t_%s_process(request = d_request)" % payload_verb)
try:
d_ret['d_ret'] = d_done["d_ret"]
d_ret['status'] = d_done["status"]
except:
self.dp.qprint("An error occurred in reading ret structure. Should this method have been threaded?")
return d_ret
def processPUT(self, **kwargs):
"""
Dispatcher for PUT
"""
d_request = {}
str_action = "run"
str_cmd = "save"
str_DBpath = self.str_DBpath
str_fileio = "json"
tree_DB = self.within.ptree
for k,v in kwargs.items():
if k == 'request': d_request = v
str_action = d_request['action']
self.dp.qprint('action = %s' % str_action)
d_meta = d_request['meta']
self.dp.qprint('action = %s' % str_action)
# Optional search criteria
if 'key' in d_meta:
d_search = self.t_search_process(request = d_request)['d_ret']
Tj = C_stree()
Tdb = C_stree()
for j in d_search.keys():
d_j = d_search[j]
for job in d_j.keys():
str_pathJob = '/api/v1/' + job
d_job = self.DB_get(path = str_pathJob)
Tj.initFromDict(d_job)
Tj.copy(startPath = '/', destination = Tdb)
# Tdb.graft(Tj, '/')
# self.DB_get(path = str_pathJob).copy(startPath = '/', destination = Tdb)
# print(Tdb)
tree_DB = Tdb
if 'context' in d_meta: str_context = d_meta['context']
if 'operation' in d_meta: str_cmd = d_meta['operation']
if 'dbpath' in d_meta: str_DBpath = d_meta['dbpath']
if 'fileio' in d_meta: str_fileio = d_meta['fileio']
if str_action.lower() == 'run' and str_context.lower() == 'db':
self.within.DB_fileIO( cmd = str_cmd,
fileio = str_fileio,
dbpath = str_DBpath,
db = tree_DB)
class Poller(threading.Thread):
"""
The Poller checks for running processes based on the internal
DB and system process table. Jobs that are no longer running are
removed from the internal DB.
"""
def __init__(self, **kwargs):
self.pollTime = 10
self.str_cmd = ""
self.crunner = None
self.queueStart = queue.Queue()
self.queueEnd = queue.Queue()
self.queueAllDone = queue.Queue()
self.__name__ = 'Poller'
# self.dp.qprint('starting...', level=-1)
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.verbosity = 1
for key,val in kwargs.items():
if key == 'pollTime': self.pollTime = val
if key == 'cmd': self.str_cmd = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
if key == 'verbosity': self.verbosity = int(val)
self.dp = pfmisc.debug(
verbosity = self.verbosity,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile,
within = self.__name__)
threading.Thread.__init__(self)
def run(self):
timeout = 1
loop = 10
""" Main execution. """
# Spawn the crunner object container
self.crunner = Crunner(cmd = self.str_cmd,
debugToFile = self.b_debugToFile,
verbosity = self.verbosity,
debugFile = self.str_debugFile)
self.crunner.start()
b_jobsAllDone = False
while not b_jobsAllDone:
try:
b_jobsAllDone = self.crunner.queueAllDone.get_nowait()
except queue.Empty:
# We basically propagate the queue contents "up" the chain.
self.dp.qprint('Waiting on start job info')
self.queueStart.put(self.crunner.queueStart.get())
self.dp.qprint('Waiting on end job info')
self.queueEnd.put(self.crunner.queueEnd.get())
self.queueAllDone.put(b_jobsAllDone)
self.dp.qprint("done with Poller.run")
class Crunner(threading.Thread):
"""
The wrapper thread about the actual process.
"""
def __init__(self, **kwargs):
self.__name = "Crunner"
self.queueStart = queue.Queue()
self.queueEnd = queue.Queue()
self.queueAllDone = queue.Queue()
self.str_cmd = ""
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.verbosity = 1
for k,v in kwargs.items():
if k == 'cmd': self.str_cmd = v
if k == 'debugFile': self.str_debugFile = v
if k == 'debugToFile': self.b_debugToFile = v
if k == 'verbosity': self.verbosity = int(v)
self.shell = crunner( verbosity = self.verbosity,
debugToFile = self.b_debugToFile,
debugFile = self.str_debugFile)
self.dp = pfmisc.debug(
verbosity = self.verbosity,
debugFile = self.str_debugFile,
debugToFile = | |
###
# Introspective Autoencoder Main training Function
# <NAME>, 2016
import argparse
import imp
import time
import logging
# import sys
# sys.path.insert(0, 'C:\Users\Andy\Generative-and-Discriminative-Voxel-Modeling')
import numpy as np
from path import Path
import theano
import theano.tensor as T
import lasagne
from utils import checkpoints, npytar, metrics_logging
from collections import OrderedDict
import matplotlib
matplotlib.use('Agg') # Turn this off if you want to display plots on your own computer or have X11 forwarding set up.
import matplotlib.pyplot as plt
#####################
# Training Functions#
#####################
#
# This function compiles all theano functions and returns
# two dicts containing the functions and theano variables.
#
def make_training_functions(cfg,model):
# Input Array
X = T.TensorType('float32', [False]*5)('X')
# Class Vector, for classification or augmenting the latent space vector
y = T.TensorType('float32', [False]*2)('y')
# Shared variable for input array
X_shared = lasagne.utils.shared_empty(5, dtype='float32')
# Shared variable for class vector
y_shared = lasagne.utils.shared_empty(2, dtype='float32')
# Input layer
l_in = model['l_in']
# Output layer
l_out = model['l_out']
# Latent Layer
l_latents = model['l_latents']
# Latent Means
l_mu = model['l_mu']
# Log-sigmas
l_ls = model['l_ls']
# Classifier
l_classifier = model['l_classifier']
# Class-conditional latents
l_cc = model['l_cc']
# Decoder Layers, including final output layer
l_decoder = lasagne.layers.get_all_layers(l_out)[len(lasagne.layers.get_all_layers(l_latents)):]
# Batch Parameters
batch_index = T.iscalar('batch_index')
batch_slice = slice(batch_index*cfg['batch_size'], (batch_index+1)*cfg['batch_size'])
#####################################
# Step 1: Compute full forward pass #
#####################################
#
# Note that calling get_output() builds a new graph each time.
# Get outputs
outputs = lasagne.layers.get_output([l_out]+[l_mu]+[l_ls]+[l_classifier]+lasagne.layers.get_all_layers(l_classifier),
{l_in:X, model['l_cc']:y}) # Consider swapping l_classifier in for l_latents
# Get the reconstruction
X_hat = outputs[0]
# Get latent means
Z_mu = outputs[1]
# Get latent logsigmas
Z_ls = outputs[2]
# Get classification guesses
y_hat = outputs[3]
# Get the outputs of the encoder layers, given the training input
g_X = outputs[5:]
# Get the outputs of the feature layers of the encoder given the reconstruction
g_X_hat = lasagne.layers.get_output(lasagne.layers.get_all_layers(l_classifier)[1:],lasagne.nonlinearities.tanh(X_hat))
# Get testing outputs
[X_hat_deterministic,latent_values,y_hat_deterministic] = lasagne.layers.get_output([l_out,l_latents,l_classifier],
{l_in:X, model['l_cc']:y},deterministic=True)
# Latent values at a given
# latent_values = lasagne.layers.get_output(l_latents,deterministic=True)
# For classification
# class_prediction = softmax_out = T.nnet.softmax(g_X[-1])
#################################
# Step 2: Define loss functions #
#################################
# L2 normalization for all params
l2_all = lasagne.regularization.regularize_network_params(l_out,
lasagne.regularization.l2)
# Weighted binary cross-entropy for use in voxel loss. Allows weighting of false positives relative to false negatives.
# Nominally set to strongly penalize false negatives
def weighted_binary_crossentropy(output,target):
return -(98.0*target * T.log(output) + 2.0*(1.0 - target) * T.log(1.0 - output))/100.0
# Voxel-Wise Reconstruction Loss
# Note that the output values are clipped to prevent the BCE from evaluating log(0).
voxel_loss = T.cast(T.mean(weighted_binary_crossentropy(T.clip(lasagne.nonlinearities.sigmoid( X_hat ), 1e-7, 1.0 - 1e-7), X)),'float32')
# KL Divergence from isotropic gaussian prior
kl_div = -0.5 * T.mean(1 + 2*Z_ls - T.sqr(Z_mu) - T.exp(2 * Z_ls))
# Compute classification loss if augmenting with a classification objective
if cfg['discriminative']:
print('discriminating')
classifier_loss = T.cast(T.mean(T.nnet.categorical_crossentropy(T.nnet.softmax(y_hat), y)), 'float32')
classifier_error_rate = T.cast( T.mean( T.neq(T.argmax(y_hat,axis=1), T.argmax(y,axis=1)) ), 'float32' )
classifier_test_error_rate = T.cast( T.mean( T.neq(T.argmax(y_hat_deterministic,axis=1), T.argmax(y,axis=1))), 'float32' )
# Sum the reconstruction loss, the regularization term, the KL divergence over the prior, and the classifier loss.
# Optionally ignore the kl divergence term.
reg_voxel_loss = voxel_loss + cfg['reg']*l2_all +classifier_loss+kl_div if cfg['kl_div'] else voxel_loss + cfg['reg']*l2_all +classifier_loss
# If not, ignore classifier
else:
classifier_loss = None
classifier_error_rate = None
classifier_test_error_rate = None
# Sum the reconstruction loss, the regularization term, and the KL divergence over the prior.
# Optionally ignore the kl divergence term.
reg_voxel_loss = voxel_loss + cfg['reg']*l2_all+kl_div if cfg['kl_div'] else voxel_loss + cfg['reg']*l2_all
##########################
# Step 3: Define Updates #
##########################
# Define learning rate in case of annealing or decay.
if isinstance(cfg['learning_rate'], dict):
learning_rate = theano.shared(np.float32(cfg['learning_rate'][0]))
else:
learning_rate = theano.shared(np.float32(cfg['learning_rate']))
# All network params
params = lasagne.layers.get_all_params(l_out,trainable=True)
# Decoder params
decoder_params = lasagne.layers.get_all_params(l_out,trainable=True)[len(lasagne.layers.get_all_params(l_latents,trainable=True)):]
# Update dict
updates = OrderedDict()
# Reconstruction and Regularization SGD terms
# Note that momentum (or a variant such as Adam) is added further down.
voxel_grads = lasagne.updates.get_or_compute_grads(reg_voxel_loss,params)
for param,grad in zip(params,voxel_grads):
updates[param] = param - learning_rate * grad
# Feature SGD Terms (AKA Introspective SGD Terms)
# Note that momentum (or a variant such as Adam) is added further down.
# Optionally add scale term to weight deeper layers more heavily.
if cfg['introspect']:
# To scale weights differently, add /sum(xrange(1,len(g_X_hat)-1))
# Also (i+1) to scale weights
feature_loss = T.cast(T.mean([T.mean(lasagne.objectives.squared_error(g_X[i],g_X_hat[i])) for i in xrange(0,len(g_X_hat)-2)]),'float32')
feature_grads = lasagne.updates.get_or_compute_grads(feature_loss,decoder_params)
for param,grad in zip(decoder_params,feature_grads):
updates[param] += - learning_rate * grad
else:
feature_loss = None
# Apply nesterov momentum to all updates.
updates = lasagne.updates.apply_nesterov_momentum(updates,momentum=cfg['momentum'])
# Reconstruction Accuracy Term
error_rate = T.cast( T.mean( T.neq(T.ge(X_hat,0), T.ge(X,0))), 'float32' )
# Test Reconstruction Accuracy
test_error_rate = T.cast( T.mean( T.neq(T.ge(X_hat_deterministic,0), T.ge(X,0))), 'float32' )
# Test Reconstruction True Positives
true_positives = T.cast(T.mean(T.eq(T.ge(X_hat_deterministic,0), T.ge(X,0.5))*T.ge(X,0.5))/T.mean(T.ge(X,0.5)),'float32')
# Test Reconstruction True Negatives
true_negatives = T.cast(T.mean(T.eq(T.ge(X_hat_deterministic,0), T.ge(X,0.5))*T.lt(X,0.5))/T.mean(T.lt(X,0.5)),'float32')
# List comprehension to define which outputs are available during training
update_outs = [x for x in [voxel_loss,
feature_loss,
classifier_loss,
kl_div,
classifier_error_rate,
error_rate] if x is not None]
# Training function
update_iter = theano.function([batch_index],update_outs,
updates=updates, givens={
X: X_shared[batch_slice],
y: y_shared[batch_slice]
},on_unused_input='warn' )
# List comprehension to define which outputs are available during testing
test_outs = [x for x in [test_error_rate,
classifier_test_error_rate,
latent_values,true_positives,true_negatives] if x is not None]
# Test function
test_error_fn = theano.function([batch_index],
test_outs, givens={
X: X_shared[batch_slice],
y: y_shared[batch_slice]
},on_unused_input='warn' )
# Dictionary of theano functions
tfuncs = {'update_iter':update_iter,
'test_function':test_error_fn,
}
# Dictionary of theano variables
tvars = {'X' : X,
'y' : y,
'X_shared' : X_shared,
'y_shared' : y_shared,
'batch_slice' : batch_slice,
'batch_index' : batch_index,
'learning_rate' : learning_rate,
}
return tfuncs, tvars
## Data augmentation function from Voxnet, which randomly translates
## and/or horizontally flips a chunk of data.
def jitter_chunk(src, cfg):
dst = src.copy()
if np.random.binomial(1, .2):
dst[:, :, ::-1, :, :] = dst
if np.random.binomial(1, .2):
dst[:, :, :, ::-1, :] = dst
max_ij = cfg['max_jitter_ij']
max_k = cfg['max_jitter_k']
shift_ijk = [np.random.random_integers(-max_ij, max_ij),
np.random.random_integers(-max_ij, max_ij),
np.random.random_integers(-max_k, max_k)]
for axis, shift in enumerate(shift_ijk):
if shift != 0:
# beware wraparound
dst = np.roll(dst, shift, axis+2)
return dst
## Data loading function, originally from VoxNet.
def data_loader(cfg, fname):
dims = cfg['dims']
chunk_size = cfg['batch_size']*cfg['batches_per_chunk']//2
xc = np.zeros((chunk_size, cfg['n_channels'],)+dims, dtype=np.float32)
reader = npytar.NpyTarReader(fname)
yc = np.zeros((chunk_size,cfg['n_classes']),dtype = np.float32)
counter = []
for ix, (x, name) in enumerate(reader):
cix = ix % chunk_size
xc[cix] = x.astype(np.float32)
yc[cix,(int(name.split('.')[0])-1)] = 1
counter.append(int(name.split('.')[0])-1)
if len(counter) == chunk_size:
indices = np.random.permutation(2*len(xc))
yield (3.0 * np.append(xc,jitter_chunk(xc, cfg),axis=0)[indices] - 1.0, np.append(yc,yc,axis=0)[indices])
counter = []
yc.fill(0)
xc.fill(0)
if len(counter) > 0:
# pad to nearest multiple of batch_size
if len(counter)%cfg['batch_size'] != 0:
new_size = int(np.ceil(len(counter)/float(cfg['batch_size'])))*cfg['batch_size']
xc = xc[:new_size]
xc[len(counter):] = xc[:(new_size-len(counter))]
yc = yc[:new_size]
yc[len(counter):] = yc[:(new_size-len(counter))]
counter = counter + counter[:(new_size-len(counter))]
indices = np.random.permutation(2*len(xc))
yield (3.0 * np.append(xc,jitter_chunk(xc, cfg),axis=0)[indices] - 1.0, np.append(yc,yc,axis=0)[indices])
# Test data loading function, originally from VoxNet
def test_data_loader(cfg,fname):
dims = cfg['dims']
chunk_size = cfg['batch_size']*cfg['batches_per_chunk']
xc = np.zeros((chunk_size, cfg['n_channels'],)+dims, dtype=np.float32)
reader = npytar.NpyTarReader(fname)
yc = np.zeros((chunk_size,cfg['n_classes']),dtype = np.float32)
counter = []
for ix, (x, name) in enumerate(reader):
cix = ix % chunk_size
xc[cix] = x.astype(np.float32)
yc[cix,(int(name.split('.')[0])-1)] = 1
counter.append(int(name.split('.')[0])-1)
if len(counter) == chunk_size:
yield (3.0*xc-1.0, yc)
counter = []
yc.fill(0)
xc.fill(0)
if len(counter) > 0:
# pad to nearest multiple of batch_size
if len(counter)%cfg['batch_size'] != 0:
new_size = int(np.ceil(len(counter)/float(cfg['batch_size'])))*cfg['batch_size']
xc = xc[:new_size]
xc[len(counter):] = xc[:(new_size-len(counter))]
yc = yc[:new_size]
yc[len(counter):] = yc[:(new_size-len(counter))]
counter = counter + counter[:(new_size-len(counter))]
yield (3.0*xc-1.0, yc)
# Main Function
def main(args):
# Load config file
config_module = imp.load_source('config', args.config_path)
cfg = config_module.cfg
# Define weights file name
weights_fname = str(args.config_path)[:-3]+'.npz'
# Define training metrics filename
metrics_fname = weights_fname[:-4]+'METRICS.jsonl'
# Prepare Logs
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s| %(message)s')
logging.info('Metrics will be saved to {}'.format(metrics_fname))
mlog = metrics_logging.MetricsLogger(metrics_fname, reinitialize=True)
# Get model and compile theano functions
model = config_module.get_model()
logging.info('Compiling theano functions...')
tfuncs, tvars = make_training_functions(cfg,model)
logging.info('Training...')
# Iteration Counter. One iteration | |
<reponame>lijiawei20161002/SecureAuction<gh_stars>1-10
"""This module collects the secure (secret-shared) types for MPyC.
Secure (secret-shared) number types all use a common base class, which
ensures that operators such as +, *, >= are defined by operator overloading.
"""
import functools
import asyncio
from mpyc import gmpy
from mpyc import gf2x
from mpyc import bfield
from mpyc import pfield
runtime = None
class Share:
"""A secret-shared value.
An MPC protocol operates on secret-shared values, represented by Share
objects. The basic Python operators are overloaded for Share objects.
An expression like a * b will create a new Share object, which will
eventually contain the product of a and b. The product is computed
asynchronously, using an instance of a specific cryptographic protocol.
"""
__slots__ = 'df'
def __init__(self, value=None):
"""Initialize a share."""
if value is not None:
self.df = value
else:
self.df = asyncio.Future(loop=runtime._loop)
def __bool__(self):
"""Use of secret-shared values in Boolean expressions makes no sense."""
raise TypeError('cannot use secure type in Boolean expressions')
def _coerce(self, other):
if isinstance(other, Share):
if not isinstance(other, type(self)):
return NotImplemented
elif isinstance(other, int):
other = type(self)(other)
elif isinstance(other, float):
if isinstance(self, SecureFixedPoint):
other = type(self)(other)
else:
return NotImplemented
return other
def _coerce2(self, other):
if isinstance(other, Share):
if not isinstance(other, type(self)):
return NotImplemented
elif isinstance(other, int):
pass
elif isinstance(other, float):
if isinstance(self, SecureFixedPoint):
if other.is_integer():
other = round(other)
else:
return NotImplemented
return other
def __neg__(self):
"""Negation."""
return runtime.neg(self)
def __add__(self, other):
"""Addition."""
other = self._coerce(other)
if other is NotImplemented:
return NotImplemented
return runtime.add(self, other)
__radd__ = __add__
def __sub__(self, other):
"""Subtraction."""
other = self._coerce(other)
if other is NotImplemented:
return NotImplemented
return runtime.sub(self, other)
def __rsub__(self, other):
"""Subtraction (with reflected arguments)."""
other = self._coerce(other)
if other is NotImplemented:
return NotImplemented
return runtime.sub(other, self)
def __mul__(self, other):
"""Multiplication."""
other = self._coerce2(other)
if other is NotImplemented:
return NotImplemented
return runtime.mul(self, other)
__rmul__ = __mul__
def __truediv__(self, other):
"""Division."""
other = self._coerce2(other)
if other is NotImplemented:
return NotImplemented
return runtime.div(self, other)
def __rtruediv__(self, other):
"""Division (with reflected arguments)."""
other = self._coerce2(other)
if other is NotImplemented:
return NotImplemented
return runtime.div(other, self)
def __mod__(self, other):
"""Integer remainder with public divisor."""
other = self._coerce(other)
if other is NotImplemented:
return NotImplemented
return runtime.mod(self, other.df.value)
def __rmod__(self, other):
"""Integer remainder (with reflected arguments)."""
return NotImplemented
def __floordiv__(self, other):
"""Integer quotient with public divisor."""
return self.__divmod__(other)[0]
def __rfloordiv__(self, other):
"""Integer quotient (with reflected arguments)."""
return NotImplemented
def __divmod__(self, other):
"""Integer division with public divisor."""
other = self._coerce(other)
if other is NotImplemented:
return NotImplemented
r = runtime.mod(self, other.df.value)
q = (self - r) / other.df
return q * 2**other.df.frac_length, r
def __rdivmod__(self, other):
"""Integer division (with reflected arguments)."""
return NotImplemented
def __pow__(self, other):
"""Exponentation for public integral exponent."""
# TODO: extend to secret exponent
if not isinstance(other, int):
return NotImplemented
return runtime.pow(self, other)
def __lshift__(self, other):
"""Left shift with public integral offset."""
# TODO: extend to secret offset
if not isinstance(other, int):
return NotImplemented
return runtime.mul(self, 1 << other)
def __rlshift__(self, other):
"""Left shift (with reflected arguments)."""
return NotImplemented
def __rshift__(self, other):
"""Right shift with public integral offset."""
# TODO: extend to secret offset
if not isinstance(other, int):
return NotImplemented
return self.__floordiv__(1 << other)
def __rrshift__(self, other):
"""Right shift (with reflected arguments)."""
return NotImplemented
def __and__(self, other):
"""Bitwise and, for now 1-bit only."""
return self * other
__rand__ = __and__
def __xor__(self, other):
"""Bitwise exclusive-or, for now 1-bit only."""
return self + other - 2 * self * other
__rxor__ = __xor__
def __invert__(self):
"""Bitwise not (inversion), for now 1-bit only."""
return 1 - self
def __or__(self, other):
"""Bitwise or, for now 1-bit only."""
return self + other - self * other
__ror__ = __or__
def __ge__(self, other):
"""Greater-than or equal comparison."""
# self >= other
c = self - other
return runtime.sgn(c, GE=True)
def __gt__(self, other):
"""Strictly greater-than comparison."""
# self > other <=> not (self <= other)
c = other - self
return 1 - runtime.sgn(c, GE=True)
def __le__(self, other):
"""Less-than or equal comparison."""
# self <= other <=> other >= self
c = other - self
return runtime.sgn(c, GE=True)
def __lt__(self, other):
"""Strictly less-than comparison."""
# self < other <=> not (self >= other)
c = self - other
return 1 - runtime.sgn(c, GE=True)
def __eq__(self, other):
"""Equality testing."""
# self == other
c = self - other
return runtime.is_zero(c)
def __ne__(self, other):
"""Negated equality testing."""
# self != other <=> not (self == other)
c = self - other
return 1 - runtime.is_zero(c)
class SecureFiniteField(Share):
"""Base class for secret-shared finite field values.
NB: bit-oriented operations will be supported for prime fields.
"""
__slots__ = ()
field = None
def __mod__(self, other):
"""Currently no support at all."""
return NotImplemented
def __rmod__(self, other):
"""Currently no support at all."""
return NotImplemented
def __floordiv__(self, other):
"""Currently no support at all."""
return NotImplemented
def __rfloordiv__(self, other):
"""Currently no support at all."""
return NotImplemented
def __divmod__(self, other):
"""Currently no support at all."""
return NotImplemented
def __rdivmod__(self, other):
"""Currently no support at all."""
return NotImplemented
def __lshift__(self, other):
"""Currently no support at all."""
return NotImplemented
def __rlshift__(self, other):
"""Currently no support at all."""
return NotImplemented
def __rshift__(self, other):
"""Currently no support at all."""
return NotImplemented
def __rrshift__(self, other):
"""Currently no support at all."""
return NotImplemented
def __and__(self, other):
"""Bitwise and for binary fields (otherwise 1-bit only)."""
if not isinstance(self.field.modulus, int):
return runtime.and_(self, other)
return super().__and__(other)
def __xor__(self, other):
"""Bitwise exclusive-or for binary fields (otherwise 1-bit only)."""
if not isinstance(self.field.modulus, int):
return runtime.xor(self, other)
return super().__xor__(other)
def __invert__(self):
"""Bitwise not (inversion) for binary fields (otherwise 1-bit only)."""
if not isinstance(self.field.modulus, int):
return runtime.invert(self)
return super().__invert__()
def __or__(self, other):
"""Bitwise or for binary fields (otherwise 1-bit only)."""
if not isinstance(self.field.modulus, int):
return runtime.or_(self, other)
return super().__or__(other)
def __ge__(self, other):
"""Currently no support at all."""
return NotImplemented
def __gt__(self, other):
"""Currently no support at all."""
return NotImplemented
def __le__(self, other):
"""Currently no support at all."""
return NotImplemented
def __lt__(self, other):
"""Currently no support at all."""
return NotImplemented
class SecureInteger(Share):
"""Base class for secret-shared integer values."""
__slots__ = ()
class SecureFixedPoint(Share):
"""Base class for secret-shared fixed-point values."""
__slots__ = ()
_sectypes = {}
def SecFld(order=None, modulus=None, char2=None, l=None):
"""Secure prime or binary field of (l+1)-bit order.
Field is prime by default, and if order (or modulus) is prime.
Field is binary if order is a power of 2, if modulus is a
polynomial, or if char2 is True.
"""
if isinstance(modulus, str):
modulus = gf2x.Polynomial(modulus)
if isinstance(modulus, gf2x.Polynomial):
char2 = char2 or (char2 is None)
assert char2 # binary field
modulus = int(modulus)
if order is not None:
if order == 2:
assert modulus is None or modulus == 2 or modulus == 3
if modulus is None or modulus == 2:
# default: prime field
char2 = char2 or False
else:
char2 = char2 or (char2 is None)
assert char2 # binary field
elif gmpy.is_prime(order):
modulus = modulus or order
assert modulus == order
char2 = char2 or False
assert not char2 # prime field
elif order % 2 == 0:
assert modulus is None or modulus.bit_length() == order.bit_length()
char2 = char2 or (char2 is None)
assert char2 # binary field
else:
raise ValueError('only prime fields and binary fields supported')
l = l or order.bit_length() - 1
assert l == order.bit_length() - 1
if modulus is None:
l = l or 1
if char2:
modulus = int(bfield.find_irreducible(l))
else:
modulus = pfield.find_prime_root(l + 1, blum=False)[0]
l = modulus.bit_length() - 1
if char2:
field = bfield.GF(modulus)
else:
field = pfield.GF(modulus)
assert runtime.threshold == 0 or field.order > len(runtime.parties), \
'Field order must exceed number of parties, unless threshold is 0.'
# TODO: field.order >= number of parties for MDS
field.is_signed = False
return _SecFld(l, field)
@functools.lru_cache(maxsize=None)
def _SecFld(l, field):
def init(self, value=None):
if value is not None:
if isinstance(value, int):
value = sectype.field(value)
super(sectype, self).__init__(value)
sectype = type(f'SecFld{l}({field})', (SecureFiniteField,),
{'__slots__': (), '__init__': init})
sectype.field = field
sectype.bit_length = l
return sectype
def _pfield(l, f, p, n):
k = | |
False,
'ca3_age': 21 * 24 * 60 * 60,
'usermoney_age': 20 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 20 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (20d < usermoney_age < 30d, ca3_age == 20d, greenflag == False): accepted
'name': 'm15',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 20 * 24 * 60 * 60,
'usermoney_age': 23 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 20 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m15, rewardamount +1 satoshi more than allowed): rejected
'name': 'm15e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 20 * 24 * 60 * 60,
'usermoney_age': 23 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 20 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (20d < usermoney_age < 30d, 20d < ca3_age < 30d, usermoney_age > ca3_age, greenflag == False): accepted
'name': 'm17',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 23 * 24 * 60 * 60,
'usermoney_age': 25 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 23 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m17, rewardamount +1 satoshi more than allowed: rejected
'name': 'm17e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 23 * 24 * 60 * 60,
'usermoney_age': 25 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 23 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (20d < usermoney_age < 30d, 20d < ca3_age < 30d, ca3_age > usermoney_age, greenflag == False): accepted
'name': 'm19',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 29 * 24 * 60 * 60,
'usermoney_age': 25 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 25 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m19, rewardamount +1 satoshi more than allowed: rejected
'name': 'm19e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 29 * 24 * 60 * 60,
'usermoney_age': 25 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 25 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (usermoney_age == 30d, 20d < ca3_age < 30d, greenflag == False): accepted
'name': 'm21',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 21 * 24 * 60 * 60,
'usermoney_age': 30 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 21 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m21, rewardamount +1 satoshi more than allowed: rejected
'name': 'm21e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 21 * 24 * 60 * 60,
'usermoney_age': 30 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 21 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (20d < usermoney_age < 30d, ca3_age == 30d, greenflag == False): accepted
'name': 'm23',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 30 * 24 * 60 * 60,
'usermoney_age': 23 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 23 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m23, rewardamount +1 satoshi more than allowed: rejected
'name': 'm23e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 30 * 24 * 60 * 60,
'usermoney_age': 23 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 23 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (usermoney_age == 30d, ca3_age == 30d, greenflag == False): accepted
'name': 'm25',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 30 * 24 * 60 * 60,
'usermoney_age': 30 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 30 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m25, rewardamount +1 satoshi more than allowed: rejected
'name': 'm25e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 30 * 24 * 60 * 60,
'usermoney_age': 30 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 30 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (usermoney_age == 30d, ca3_age > 30d, greenflag == False): accepted
'name': 'm27',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 31 * 24 * 60 * 60,
'usermoney_age': 30 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 30 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m27, rewardamount +1 satoshi more than allowed: rejected
'name': 'm27e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 31 * 24 * 60 * 60,
'usermoney_age': 30 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 30 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (usermoney_age > 30d, ca3_age == 30d, greenflag == False): accepted
'name': 'm29',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 30 * 24 * 60 * 60,
'usermoney_age': 37 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 30 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m29, rewardamount +1 satoshi more than allowed: rejected
'name': 'm29e',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 30 * 24 * 60 * 60,
'usermoney_age': 37 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 30 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (usermoney_age > 30d, ca3_age > 30d, usermoney_age > ca3_age, greenflag == False): accepted
'name': 'm31',
'rootcertamount': 1 * COIN,
'greenflag': False,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 31 * 24 * 60 * 60,
'usermoney_age': 35 * 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 * 30 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# m31, rewardamount +1 satoshi more than allowed: | |
link.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
TypedLinkName (string) -- [REQUIRED]The unique name of the typed link facet.
:type Attributes: list
:param Attributes: [REQUIRED]
An ordered set of attributes that are associated with the typed link.
(dict) --Identifies the attribute name and value for a typed link.
AttributeName (string) -- [REQUIRED]The attribute name of the typed link.
Value (dict) -- [REQUIRED]The value for the typed link.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:rtype: dict
:return: {
'TypedLinkSpecifier': {
'TypedLinkFacet': {
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
'SourceObjectReference': {
'Selector': 'string'
},
'TargetObjectReference': {
'Selector': 'string'
},
'IdentityAttributeValues': [
{
'AttributeName': 'string',
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
}
}
:returns:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An objects identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
"""
pass
def batch_read(DirectoryArn=None, Operations=None, ConsistencyLevel=None):
"""
Performs all the read operations in a batch.
See also: AWS API Documentation
:example: response = client.batch_read(
DirectoryArn='string',
Operations=[
{
'ListObjectAttributes': {
'ObjectReference': {
'Selector': 'string'
},
'NextToken': 'string',
'MaxResults': 123,
'FacetFilter': {
'SchemaArn': 'string',
'FacetName': 'string'
}
},
'ListObjectChildren': {
'ObjectReference': {
'Selector': 'string'
},
'NextToken': 'string',
'MaxResults': 123
}
},
],
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory . For more information, see arns .
:type Operations: list
:param Operations: [REQUIRED]
A list of operations that are part of the batch.
(dict) --Represents the output of a BatchRead operation.
ListObjectAttributes (dict) --Lists all attributes that are associated with an object.
ObjectReference (dict) -- [REQUIRED]Reference of the object whose attributes need to be listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
NextToken (string) --The pagination token.
MaxResults (integer) --The maximum number of items to be retrieved in a single call. This is an approximate number.
FacetFilter (dict) --Used to filter the list of object attributes that are associated with a certain facet.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
ListObjectChildren (dict) --Returns a paginated list of child objects that are associated with a given object.
ObjectReference (dict) -- [REQUIRED]Reference of the object for which child objects are being listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
NextToken (string) --The pagination token.
MaxResults (integer) --Maximum number of items to be retrieved in a single call. This is an approximate number.
:type ConsistencyLevel: string
:param ConsistencyLevel: Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.
:rtype: dict
:return: {
'Responses': [
{
'SuccessfulResponse': {
'ListObjectAttributes': {
'Attributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'NextToken': 'string'
},
'ListObjectChildren': {
'Children': {
'string': 'string'
},
'NextToken': 'string'
}
},
'ExceptionResponse': {
'Type': 'ValidationException'|'InvalidArnException'|'ResourceNotFoundException'|'InvalidNextTokenException'|'AccessDeniedException'|'NotNodeException',
'Message': 'string'
}
},
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_write(DirectoryArn=None, Operations=None):
"""
Performs all the write operations in a batch. Either all the operations succeed or none. Batch writes supports only object-related operations.
See also: AWS API Documentation
:example: response = client.batch_write(
DirectoryArn='string',
Operations=[
{
'CreateObject': {
'SchemaFacet': [
{
'SchemaArn': 'string',
'FacetName': 'string'
},
],
'ObjectAttributeList': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'ParentReference': {
'Selector': 'string'
},
'LinkName': 'string',
'BatchReferenceName': 'string'
},
'AttachObject': {
'ParentReference': {
'Selector': 'string'
},
'ChildReference': {
'Selector': 'string'
},
'LinkName': 'string'
},
'DetachObject': {
'ParentReference': {
'Selector': 'string'
},
'LinkName': 'string',
'BatchReferenceName': 'string'
},
'UpdateObjectAttributes': {
'ObjectReference': {
'Selector': 'string'
},
'AttributeUpdates': [
{
'ObjectAttributeKey': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'ObjectAttributeAction': {
'ObjectAttributeActionType': 'CREATE_OR_UPDATE'|'DELETE',
'ObjectAttributeUpdateValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
}
},
]
},
'DeleteObject': {
'ObjectReference': {
'Selector': 'string'
}
},
'AddFacetToObject': {
'SchemaFacet': {
'SchemaArn': 'string',
'FacetName': 'string'
},
'ObjectAttributeList': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'ObjectReference': {
'Selector': 'string'
}
},
'RemoveFacetFromObject': {
'SchemaFacet': {
'SchemaArn': 'string',
'FacetName': 'string'
},
'ObjectReference': {
'Selector': 'string'
}
}
},
]
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory . For more information, see arns .
:type Operations: list
:param Operations: [REQUIRED]
A list of operations that are part of the batch.
(dict) --Represents the output of a BatchWrite operation.
CreateObject (dict) --Creates an object.
SchemaFacet (list) -- [REQUIRED]A list of FacetArns that will be associated with the object. For more information, see arns .
(dict) --A facet.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
ObjectAttributeList (list) -- [REQUIRED]An attribute map, which contains an attribute ARN as the key and attribute value as the map value.
(dict) --The combination of an attribute key and an attribute value.
Key (dict) -- [REQUIRED]The key of the attribute.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
Value (dict) -- [REQUIRED]The value of the attribute.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean | |
bands on the Sentinel-2A & B sensors.
The index indicates levels of chlorophyll-a (chl-a) concentrations in complex turbid productive waters such as those encountered in many inland water bodies. The index has not been validated in Australian waters, and there are a range of environmental conditions that may have an effect on the accuracy of the derived index values in this test implementation, including:
- Influence on the remote sensing signal from nearby land and/or atmospheric effects
- Optically shallow water
- Cloud cover
<NAME>., <NAME>., 2012. Normalized difference chlorophyll index: A novel model for remote estimation of chlorophyll-a concentration in turbid productive waters. Remote Sensing of Environment, Remote Sensing of Urban Environments 117, 394–406. https://doi.org/10.1016/j.rse.2011.10.016
For service status information, see https://status.dea.ga.gov.au""",
# The WMS name for the layer
"name": "s2b_nrt_granule_nbar_t",
# The Datacube name for the associated data product
"product_name": "s2b_nrt_granule",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_nrt_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi", "ndci"]
},
"wcs_default_bands": ["nbart_red", "nbart_green", "nbart_blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nbart_nir_1"] - data["nbart_red"]) / (data["nbart_nir_1"] + data["nbart_red"]),
"needed_bands": ["nbart_red", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, NIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["nbart_green"] - data["nbart_nir_1"]) / (
data["nbart_nir_1"] + data["nbart_green"]),
"needed_bands": ["nbart_green", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
{
"name": "ndci",
"title": "NDCI - Red Edge, Red",
"abstract": "Normalised Difference Chlorophyll Index - a derived index that correlates well with the existence of chlorophyll",
"index_function": lambda data: (data["nbart_red_edge_1"] - data["nbart_red"]) / (data["nbart_red_edge_1"] + data["nbart_red"]).where(((data["nbart_green"] - data["nbart_swir_3"]) / (data["nbart_green"] + data["nbart_swir_3"])) > 0.1),
"needed_bands": ["nbart_red_edge_1", "nbart_red", "nbart_green", "nbart_swir_3"],
"color_ramp": [
{
"value": -0.1,
"color": "#1696FF",
"legend": {
"prefix" : "<"
}
},
{
"value": -0.1,
"color": "#1696FF"
},
{
"value": 0.0,
"color": "#00FFDF",
"legend": { }
},
{
"value": 0.1,
"color": "#FFF50E",
},
{
"value": 0.2,
"color": "#FFB50A",
"legend": { }
},
{
"value": 0.4,
"color": "#FF530D",
},
{
"value": 0.5,
"color": "#FF0000",
"legend": {
"prefix": ">"
}
}
]
},
{
"name": "aerosol",
"title": "Narrow Blue - 440",
"abstract": "Coastal Aerosol or Narrow Blue band, approximately 435nm to 450nm",
"components": {
"red": {
"nbart_coastal_aerosol": 1.0
},
"green": {
"nbart_coastal_aerosol": 1.0
},
"blue": {
"nbart_coastal_aerosol": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Blue - 490",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"nbart_blue": 1.0
},
"green": {
"nbart_blue": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"nbart_green": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 670",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_red": 1.0
},
"blue": {
"nbart_red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_1",
"title": "Vegetation Red Edge - 710",
"abstract": "Near infra-red band, centred on 710nm",
"components": {
"red": {
"nbart_red_edge_1": 1.0
},
"green": {
"nbart_red_edge_1": 1.0
},
"blue": {
"nbart_red_edge_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_2",
"title": "Vegetation Red Edge - 740",
"abstract": "Near infra-red band, centred on 740nm",
"components": {
"red": {
"nbart_red_edge_2": 1.0
},
"green": {
"nbart_red_edge_2": 1.0
},
"blue": {
"nbart_red_edge_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_3",
"title": "Vegetation Red Edge - 780",
"abstract": "Near infra-red band, centred on 780nm",
"components": {
"red": {
"nbart_red_edge_3": 1.0
},
"green": {
"nbart_red_edge_3": 1.0
},
"blue": {
"nbart_red_edge_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 840",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nbart_nir_1": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_nir_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "narrow_nir",
"title": "Narrow Near Infrared - 870",
"abstract": "Near infra-red band, centred on 865nm",
"components": {
"red": {
"nbart_nir_2": 1.0
},
"green": {
"nbart_nir_2": 1.0
},
"blue": {
"nbart_nir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1610",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_swir_2": 1.0
},
"blue": {
"nbart_swir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2190",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"nbart_swir_3": 1.0
},
"green": {
"nbart_swir_3": 1.0
},
"blue": {
"nbart_swir_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "Sentinel 2A",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "Surface Reflectance",
"abstract": """
This is a 90-day rolling archive of daily Sentinel-2 Near Real Time data. The Near Real-Time capability provides analysis-ready data that is processed on receipt using the best-available ancillary information at the time to provide atmospheric | |
Letter Request/" + gp_letter_request)
msg = frappe.render_template('one_fm/templates/emails/gp_letter_attachment_no_response.html', context={"page_link": page_link, "gp_letter_request": gp_letter_request})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('GP Letter Request Setting', 'grd_email')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="GP Letter Upload No Response", delayed=False)
def send_travel_agent_email():
gp_letters_request = frappe.db.sql_list("select name from `tabGP Letter Request` where (gp_status is NULL or gp_status='' or gp_status='Reject') and (supplier is not NULL or supplier!='') ")
for gp_letter_request in gp_letters_request:
gp_letter_doc = frappe.get_doc("GP Letter Request", gp_letter_request)
if gp_letter_doc.gp_status!='No Response':
if not gp_letter_doc.sent_date:
send_gp_email(gp_letter_doc.pid, gp_letter_doc.gp_letter_candidates, gp_letter_request)
gp_letter_doc.sent_date = frappe.utils.now()
gp_letter_doc.save(ignore_permissions = True)
# elif not gp_letter_doc.reminder1:
# send_gp_email(gp_letter_doc.pid, gp_letter_doc.gp_letter_candidates, gp_letter_request)
# gp_letter_doc.reminder1 = frappe.utils.now()
# gp_letter_doc.save(ignore_permissions = True)
# elif not gp_letter_doc.reminder2:
# send_gp_email(gp_letter_doc.pid, gp_letter_doc.gp_letter_candidates, gp_letter_request)
# gp_letter_doc.reminder2 = frappe.utils.now()
# gp_letter_doc.save(ignore_permissions = True)
# else:
# gp_letter_doc.gp_status = 'No Response'
# gp_letter_doc.save(ignore_permissions = True)
# page_link = "http://206.189.228.82/desk#Form/GP Letter Request/" + gp_letter_request
# # page_link = get_url("/desk#Form/GP Letter Request/" + gp_letter_request)
# msg = frappe.render_template('one_fm/templates/emails/gp_letter_request_no_response.html', context={"page_link": page_link, "gp_letter_request": gp_letter_request})
# sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
# recipient = frappe.db.get_single_value('GP Letter Request Setting', 'grd_email')
# sendemail(sender=sender, recipients= recipient,
# content=msg, subject="GP Letter Request No Response", delayed=False)
def send_gp_letter_reminder():
gp_letters_request = frappe.db.sql_list("select name from `tabGP Letter Request` where (gp_status is NULL or gp_status='' or gp_status='Reject') and (supplier is not NULL or supplier!='') ")
for gp_letter_request in gp_letters_request:
gp_letter_doc = frappe.get_doc("GP Letter Request", gp_letter_request)
if gp_letter_doc.gp_status!='No Response':
if gp_letter_doc.sent_date and not gp_letter_doc.reminder1:
after_three_hour = add_to_date(gp_letter_doc.sent_date, hours=3)
if get_datetime(frappe.utils.now())>=get_datetime(after_three_hour):
send_gp_email(gp_letter_doc.pid, gp_letter_doc.gp_letter_candidates, gp_letter_request)
gp_letter_doc.reminder1 = frappe.utils.now()
gp_letter_doc.save(ignore_permissions = True)
if gp_letter_doc.reminder1 and not gp_letter_doc.reminder2:
after_three_hour = add_to_date(gp_letter_doc.reminder1, hours=3)
if get_datetime(frappe.utils.now())>=get_datetime(after_three_hour):
send_gp_email(gp_letter_doc.pid, gp_letter_doc.gp_letter_candidates, gp_letter_request)
gp_letter_doc.reminder2 = frappe.utils.now()
gp_letter_doc.save(ignore_permissions = True)
if gp_letter_doc.reminder2:
after_three_hour = add_to_date(gp_letter_doc.reminder2, hours=3)
if get_datetime(frappe.utils.now())>=get_datetime(after_three_hour):
send_gp_email(gp_letter_doc.pid, gp_letter_doc.gp_letter_candidates, gp_letter_request)
gp_letter_doc.gp_status = 'No Response'
gp_letter_doc.save(ignore_permissions = True)
page_link = "http://206.189.228.82/desk#Form/GP Letter Request/" + gp_letter_request
# page_link = get_url("/desk#Form/GP Letter Request/" + gp_letter_request)
msg = frappe.render_template('one_fm/templates/emails/gp_letter_request_no_response.html', context={"page_link": page_link, "gp_letter_request": gp_letter_request})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('GP Letter Request Setting', 'grd_email')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="GP Letter Request No Response", delayed=False)
def send_gp_email(pid, candidates, gp_letter_request):
gp_letter_doc = frappe.get_doc("GP Letter Request", gp_letter_request)
page_link = "http://206.189.228.82/gp_letter_request?pid=" + pid
# page_link = get_url("/gp_letter_request?pid=" + pid)
msg = frappe.render_template('one_fm/templates/emails/gp_letter_request.html', context={"page_link": page_link, "candidates": candidates})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('GP Letter Request Setting', 'grd_email')
# my_attachments = [frappe.attach_print('GP Letter Request', gp_letter_request, file_name=gp_letter_doc.excel_sheet_attachment)]
site_name = cstr(frappe.local.site)
path = "/home/frappe/frappe-bench/sites/{0}/public/files/{1}.xlsx".format(site_name, gp_letter_request)
with open(path, "rb") as fileobj:
filedata = fileobj.read()
attachments = [{
'fname': cstr(gp_letter_request)+'.xlsx',
'fcontent': filedata
}]
sendemail(sender=sender, recipients= recipient,
content=msg, subject="Request for GP Letter | {0}".format(gp_letter_request), attachments=attachments ,delayed=False)
def create_gp_letter_request():
gp_letters = frappe.db.sql_list("select name from `tabGP Letter` where gp_letter_request_reference is NULL or gp_letter_request_reference='' ")
candidates=[]
for gp_letter in gp_letters:
# gp_letter_doc = frappe.get_doc("GP Letter", gp_letter)
# candidates.append(gp_letter_doc.candidate_name)
candidates.append(gp_letter)
if len(candidates)>0:
supplier_category_val = ''
supplier_subcategory_val = ''
supplier_name_val = ''
services_item_group = frappe.db.sql("select name from `tabItem Group` where parent_item_group='All Item Groups' and item_group_name like '%Services%' ")
if services_item_group:
supplier_category_val = services_item_group[0][0]
travel_agent_group = frappe.db.sql("select name from `tabItem Group` where parent_item_group='{0}' and item_group_name like '%Travel Agent%' ".format(supplier_category_val))
if travel_agent_group:
supplier_subcategory_val = travel_agent_group[0][0]
supplier = frappe.db.get_single_value('GP Letter Request Setting', 'default_supplier')
if supplier:
supplier_name_val = supplier
doc = frappe.new_doc('GP Letter Request')
for candidate in candidates:
gp_letter_doc = frappe.get_doc("GP Letter", candidate)
doc.append("gp_letter_candidates",{
"gp_letter": candidate,
"candidate": gp_letter_doc.candidate_name,
})
doc.supplier_category = supplier_category_val
doc.supplier_subcategory = supplier_subcategory_val
doc.supplier = supplier_name_val
doc.save(ignore_permissions = True)
site_name = cstr(frappe.local.site)
import xlsxwriter
workbook = xlsxwriter.Workbook('/home/frappe/frappe-bench/sites/{0}/public/files/{1}.xlsx'.format(site_name, doc.name))
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Sr.No')
worksheet.write('B1', 'Candidate Name In English')
worksheet.write('C1', 'Candidate Name In Arabic')
worksheet.write('D1', 'Passport Number')
worksheet.write('E1', 'Candidate Nationality in Arabic')
worksheet.write('F1', 'Company Name in Arabic')
worksheet.set_column('B:F', 22)
gp_letter_request_doc = frappe.get_doc("GP Letter Request", doc.name)
row = 1
column = 0
for candidate in gp_letter_request_doc.gp_letter_candidates:
worksheet.write(row, column, candidate.idx)
worksheet.write(row, column+1, candidate.candidate)
# column += 1
row += 1
workbook.close()
gp_letter_request_doc.excel_sheet_attachment = "/files/{0}.xlsx".format(doc.name)
gp_letter_request_doc.save(ignore_permissions = True)
for gp_letter in gp_letters:
gp_letter_doc = frappe.get_doc("GP Letter", gp_letter)
gp_letter_doc.gp_letter_request_reference = doc.name
gp_letter_doc.save(ignore_permissions = True)
@frappe.whitelist(allow_guest=True)
def leave_appillication_on_submit(doc, method):
if doc.status == "Approved":
leave_appillication_paid_sick_leave(doc, method)
update_employee_hajj_status(doc, method)
@frappe.whitelist(allow_guest=True)
def leave_appillication_on_cancel(doc, method):
update_employee_hajj_status(doc, method)
@frappe.whitelist(allow_guest=True)
def leave_appillication_paid_sick_leave(doc, method):
if doc.leave_type and frappe.db.get_value("Leave Type", doc.leave_type, 'one_fm_is_paid_sick_leave') == 1:
create_additional_salary_for_paid_sick_leave(doc)
def create_additional_salary_for_paid_sick_leave(doc):
salary = get_salary(doc.employee)
daily_rate = salary/30
from erpnext.hr.doctype.leave_application.leave_application import get_leave_details
leave_details = get_leave_details(doc.employee, nowdate())
curr_year_applied_days = 0
if doc.leave_type in leave_details['leave_allocation'] and leave_details['leave_allocation'][doc.leave_type]:
curr_year_applied_days = leave_details['leave_allocation'][doc.leave_type]['leaves_taken']
if curr_year_applied_days == 0:
curr_year_applied_days = doc.total_leave_days
leave_payment_breakdown = get_leave_payment_breakdown(doc.leave_type)
total_payment_days = 0
if leave_payment_breakdown:
threshold_days = 0
for payment_breakdown in leave_payment_breakdown:
payment_days = 0
threshold_days += payment_breakdown.threshold_days
if total_payment_days < doc.total_leave_days:
if curr_year_applied_days >= threshold_days and (curr_year_applied_days - doc.total_leave_days) < threshold_days:
payment_days = threshold_days - (curr_year_applied_days-doc.total_leave_days) - total_payment_days
elif curr_year_applied_days <= threshold_days: # Gives true this also doc.total_leave_days <= threshold_days:
payment_days = doc.total_leave_days - total_payment_days
create_additional_salary(salary, daily_rate, payment_days, doc, payment_breakdown)
total_payment_days += payment_days
if total_payment_days < doc.total_leave_days and doc.total_leave_days-total_payment_days > 0:
create_additional_salary(salary, daily_rate, doc.total_leave_days-total_payment_days, doc)
def create_additional_salary(salary, daily_rate, payment_days, leave_application, payment_breakdown=False):
if payment_days > 0:
deduction_percentage = 1
salary_component = frappe.db.get_value("Leave Type", leave_application.leave_type, "one_fm_paid_sick_leave_deduction_salary_component")
if payment_breakdown:
deduction_percentage = payment_breakdown.salary_deduction_percentage/100
salary_component = payment_breakdown.salary_component
deduction_notes = """
Employee Salary: <b>{0}</b><br>
Daily Rate: <b>{1}</b><br>
Deduction Days Number: <b>{2}</b><br>
Deduction Percent: <b>{3}%</b>
""".format(salary, daily_rate, payment_days, deduction_percentage*100)
additional_salary = frappe.get_doc({
"doctype":"Additional Salary",
"employee": leave_application.employee,
"salary_component": salary_component,
"payroll_date": leave_application.from_date,
"leave_application": leave_application.name,
"notes": deduction_notes,
"amount": payment_days*daily_rate*deduction_percentage
}).insert(ignore_permissions=True)
additional_salary.submit()
def get_leave_payment_breakdown(leave_type):
leave_type_doc = frappe.get_doc("Leave Type", leave_type)
return leave_type_doc.one_fm_leave_payment_breakdown if leave_type_doc.one_fm_leave_payment_breakdown else False
def validate_leave_type_for_one_fm_paid_leave(doc, method):
if doc.is_lwp:
doc.one_fm_is_paid_sick_leave = False
doc.one_fm_is_paid_annual_leave = False
elif doc.one_fm_is_paid_sick_leave:
doc.is_lwp = False
doc.one_fm_is_paid_annual_leave = False
doc.one_fm_is_hajj_leave = False
if not doc.one_fm_paid_sick_leave_deduction_salary_component and not doc.one_fm_leave_payment_breakdown:
frappe.throw(_('Either Paid Sick Leave Deduction Salary Component or Leave Payment Breakdown is Mandatory'))
elif doc.one_fm_is_paid_annual_leave:
doc.is_lwp = False
doc.one_fm_is_paid_sick_leave = False
doc.one_fm_is_hajj_leave = False
if not doc.leave_allocation_matrix:
frappe.throw(_('Leave Allocation Matrix is Mandatory'))
elif doc.one_fm_is_hajj_leave:
doc.one_fm_is_paid_annual_leave = False
@frappe.whitelist(allow_guest=True)
def bereavement_leave_validation(doc, method):
allocation = frappe.db.sql("select name from `tabLeave Allocation` where leave_type='Bereavement - وفاة' and employee='{0}' and docstatus=1 and '{1}' between from_date and to_date order by to_date desc limit 1".format(doc.employee, nowdate()))
if allocation:
allocation_doc = frappe.get_doc('Leave Allocation', allocation[0][0])
allocation_doc.new_leaves_allocated = allocation_doc.new_leaves_allocated+doc.total_leave_days
allocation_doc.total_leaves_allocated = allocation_doc.new_leaves_allocated+allocation_doc.unused_leaves
allocation_doc.save()
frappe.db.commit()
print("Increase Bereavement leave balance for employee {0}".format(doc.employee))
ledger = frappe._dict(
doctype='Leave Ledger Entry',
employee=doc.employee,
leave_type='Bereavement - وفاة',
transaction_type='Leave Allocation',
transaction_name=allocation[0][0],
leaves = doc.total_leave_days,
from_date = allocation_doc.from_date,
to_date = allocation_doc.to_date,
is_carry_forward=0,
is_expired=0,
is_lwp=0
)
frappe.get_doc(ledger).submit()
@frappe.whitelist(allow_guest=True)
def update_employee_hajj_status(doc, method):
if doc.leave_type and frappe.db.get_value('Leave Type', doc.leave_type, 'one_fm_is_hajj_leave') == 1:
if method == "on_submit":
frappe.db.set_value("Employee", doc.employee, 'went_to_hajj', True)
if method == "on_cancel":
frappe.db.set_value("Employee", doc.employee, 'went_to_hajj', False)
@frappe.whitelist(allow_guest=True)
def validate_hajj_leave(doc, method):
if doc.leave_type and frappe.db.get_value('Leave Type', doc.leave_type, 'one_fm_is_hajj_leave') == 1:
if frappe.db.get_value('Employee', doc.employee, 'went_to_hajj') == 1:
frappe.throw(_("You can't apply for hajj leave twice"))
def get_salary(employee):
salary_amount = 0
salary_slip_name = frappe.db.sql("select name from `tabSalary Slip` where employee='{0}' order by creation desc limit 1".format(employee))
if salary_slip_name:
doc = frappe.get_doc('Salary Slip', salary_slip_name[0][0])
for earning in doc.earnings:
if earning.salary_component =='Basic':
salary_amount = earning.amount
else:
doc = frappe.new_doc("Salary Slip")
doc.payroll_frequency= "Monthly"
doc.start_date=get_first_day(getdate(nowdate()))
doc.end_date=get_last_day(getdate(nowdate()))
doc.employee= str(employee)
doc.posting_date= nowdate()
doc.insert(ignore_permissions=True)
if doc.name:
for earning in doc.earnings:
if earning.salary_component =='Basic':
salary_amount = earning.amount
doc.delete()
return salary_amount
@frappe.whitelist()
def hooked_leave_allocation_builder():
'''
Function used to create leave allocations
- Create Leave Allocation for Employee, who is having a valid leave policy
Triggered from hooks as daily event
'''
# Get Active Employee List and set background job to create leave allocations based on the leave policy
employee_list = frappe.get_all("Employee", filters={"status": "Active"},
fields=["name", "date_of_joining", "went_to_hajj", "grade", "leave_policy"])
frappe.enqueue(leave_allocation_builder, timeout=600, employee_list=employee_list)
def leave_allocation_builder(employee_list):
'''
Function used to create leave allocations for a given employee list
- Create Leave Allocation for Employee, who is having a valid leave policy
args:
employee_list: List of Employees with minimum data (name, date_of_joining, went_to_hajj, grade and leave_policy)
'''
from erpnext.hr.doctype.leave_allocation.leave_allocation import get_leave_allocation_for_period
# Get Leave Type details (configurations)
leave_type_details = get_leave_type_details()
# Iterate Employee List for finidng employee leave policy to create leave allocation
for employee in employee_list:
# Get employee Leave Policy Object
leave_policy = get_employee_leave_policy(employee)
# Check leave policy and details exists
if leave_policy and leave_policy.leave_policy_details:
# Iterate Leave policy details to check if there | |
import string
import tkinter
import BingoBoard
import Config
from time import time
from tkinter import ttk
from tkinter import messagebox
from copy import deepcopy
from math import ceil
class TimespinnerBingo(tkinter.Frame):
def __init__(self, master, **kw):
super().__init__(master, **kw)
self.config = Config.Config()
self.master = master
self.cbRows = None
self.cbColumns = None
self.lblAvailableIcons = None
self.lblRequiredIcons = None
self.availableIcons = 0
self.requiredIcons = 0
self.tfSeed = None
self.btnGenerate = None
self.variables = {}
self.candidates = {}
self.checkbox_events = []
#
# COLUMN 0-1 - icon settings
#
# Label at the top of Column 1
icon_label = tkinter.Label(master=self.master, text="Icons")
icon_label.grid(row=5, column=0, sticky="s")
icon_separator = ttk.Separator(master=self.master, orient="horizontal")
icon_separator.grid(row=6, column=0, columnspan=100, sticky="ew")
# To make a scrollable area containing the checkboxes is pretty complicated.
# We need a Canvas, a Scrollbar, a Frame, checkboxes, and Master.
# The checkboxes are inserted into the frame. The frame is inserted into the Canvas.
# The Canvas and Scrollbar are inserted into Master.
icon_canvas = tkinter.Canvas(master=self.master)
icon_canvas_scrollbar = tkinter.Scrollbar(
master=self.master,
orient='vertical',
command=icon_canvas.yview
)
icon_container = tkinter.Frame(master=icon_canvas)
# Method that scrolls the list of icons with the mousewheel
def icon_canvas_scroll(event):
icon_canvas.yview_scroll(-1 * int(event.delta / 120), 'units')
# Bind the canvas and frame to scroll the canvas whenever the mouse is over them and
# the user scrolls the mouse wheel
icon_canvas.bind('<MouseWheel>', icon_canvas_scroll)
icon_container.bind('<MouseWheel>', icon_canvas_scroll)
# Iterates through item-related settings, providing checkboxes
objective_index = 2
for key in self.config.get_tile_data().keys():
var = tkinter.IntVar()
widget = tkinter.Checkbutton(
master=icon_container,
text=string.capwords(key),
variable=var
)
# Have the checkbox start checked if the item is enabled in config
if self.config.get_tile_data()[key]['enabled']:
widget.select()
# Add the checkbox to the variable list so the checkbox state can be identified later
self.variables[widget["text"]] = var
# icon_changed is the changelistener that runs when the checkbox is checked
widget.config(command=lambda arg=widget: self.icon_changed(arg))
# We want to bind the mousewheel to scroll the canvas holding the checkbox
# If this is not done, the canvas will not scroll if the mouse is over a checkbox
widget.bind('<MouseWheel>', icon_canvas_scroll)
self.checkbox_events.append(CheckboxEvents(widget, self.config, "icon", key))
# Anchoring justifies the checkboxes against the left side
widget.pack(anchor="w")
objective_index += 1
# The Canvas create_window command is required for the scrollbar to work properly
icon_canvas.create_window(0, 0, anchor='nw', window=icon_container, width=175)
# The Canvas update_idletasks waits until the checkboxes are added before configuring the scrollbar
# If this is not done, the scrollbar does not work properly because the Canvas is not full yet?
icon_canvas.update_idletasks()
icon_canvas.configure(
scrollregion=icon_canvas.bbox('all'),
yscrollcommand=icon_canvas_scrollbar.set,
width=175
)
# Configures the row containing the scrollable canvases to fill the rest of the window vertically
master.grid_rowconfigure(7, weight=1)
icon_canvas_scrollbar.grid(row=7, column=1, sticky='nse')
icon_canvas.grid(row=7, column=0, padx=(5, 0), sticky='ns')
#
# COLUMN 2 - Separator
#
col_separator1 = ttk.Separator(master=self.master, orient="vertical")
col_separator1.grid(row=5, column=2, padx=(5, 5), rowspan=3, sticky="ns")
#
# COLUMN 3-4 - tag settings
#
tag_label = tkinter.Label(master=self.master, text="Tags")
tag_label.grid(row=5, column=3, sticky="s")
tag_separator = ttk.Separator(
master=self.master,
orient="horizontal"
)
tag_separator.grid(row=6, column=3, columnspan=100, sticky="ew")
# To make a scrollable area containing the checkboxes is pretty complicated.
# We need a Canvas, a Scrollbar, a Frame, checkboxes, and Master.
# The checkboxes are inserted into the frame. The frame is inserted into the Canvas.
# The Canvas and Scrollbar are inserted into Master.
tag_canvas = tkinter.Canvas(master=self.master)
tag_canvas_scrollbar = tkinter.Scrollbar(
master=self.master,
orient='vertical',
command=tag_canvas.yview
)
tag_container = tkinter.Frame(master=tag_canvas)
# Method that scrolls the list of icons with the mousewheel
def tag_canvas_scroll(event):
tag_canvas.yview_scroll(-1 * int(event.delta / 120), 'units')
# Bind the canvas and frame to scroll the canvas whenever the mouse is over them and
# the user scrolls the mouse wheel
tag_canvas.bind('<MouseWheel>', tag_canvas_scroll)
tag_container.bind('<MouseWheel>', tag_canvas_scroll)
# Iterates through item-related settings, providing checkboxes
objective_index = 2
for key in sorted(self.config.get_tag_data().keys()):
var = tkinter.IntVar()
widget = tkinter.Checkbutton(
master=tag_container,
text=string.capwords(key),
variable=var
)
# Have the checkbox start checked if the item is enabled in config
if self.config.get_tag_data()[key]['enabled']:
widget.select()
# Add the checkbox to the variable list so the checkbox state can be identified later
self.variables[widget["text"]] = var
# icon_changed is the changelistener that runs when the checkbox is checked
widget.config(command=lambda arg=widget: self.tag_changed(arg))
# We want to bind the mousewheel to scroll the canvas holding the checkbox
# If this is not done, the canvas will not scroll if the mouse is over a checkbox
widget.bind('<MouseWheel>', tag_canvas_scroll)
self.checkbox_events.append(CheckboxEvents(widget, self.config, "tag", key))
# Anchoring justifies the checkboxes against the left side
widget.pack(anchor="w")
objective_index += 1
# The Canvas create_window command is required for the scrollbar to work properly
tag_canvas.create_window(0, 0, anchor='nw', window=tag_container, width=175)
# The Canvas update_idletasks waits until the checkboxes are added before configuring the scrollbar
# If this is not done, the scrollbar does not work properly because the Canvas is not full yet?
tag_canvas.update_idletasks()
tag_canvas.configure(
scrollregion=tag_canvas.bbox('all'),
yscrollcommand=tag_canvas_scrollbar.set,
width=175
)
tag_canvas_scrollbar.grid(row=7, column=4, sticky='nse')
tag_canvas.grid(row=7, column=3, padx=(5, 0), sticky='ns')
#
# Column 5
#
config_separator = ttk.Separator(
master=self.master,
orient="vertical"
)
config_separator.grid(row=5, column=5, rowspan=3, padx=(5, 5), sticky="ns")
#
# Column 6 & 7 - generation settings
#
layout_label = tkinter.Label(
master=self.master,
text="Generation Settings"
)
layout_label.grid(row=5, column=6, sticky="s")
# Since the entirety of the area below the header is a single row in master, we need to
# wrap everything in a frame to effectively split it into sub-rows
layout_frame = tkinter.Frame(
master=self.master
)
layout_frame.grid(row=7, column=6, pady=(5, 0), sticky='news')
layout_rows_label = tkinter.Label(
master=layout_frame,
text="Bingo Rows: "
)
layout_rows_label.grid(row=0, column=1, padx=(5, 5), pady=(5, 0), sticky="w")
self.cbRows = ttk.Combobox(
master=layout_frame,
text="Rows: " + str(self.availableIcons),
width=5,
values=("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"),
state='readonly'
)
self.cbRows.current(int(self.config.rows["value"]) - 1)
self.cbRows.bind("<<ComboboxSelected>>", lambda x: self.rows_changed())
self.cbRows.grid(row=0, column=2, padx=(0, 10), pady=(5, 0), sticky="ew")
layout_columns_label = tkinter.Label(
master=layout_frame,
text="Bingo Columns: "
)
layout_columns_label.grid(row=1, column=1, padx=(5, 0), pady=(5, 0), sticky="w")
self.cbColumns = ttk.Combobox(
master=layout_frame,
text="Columns: " + str(self.availableIcons),
width=5,
values=("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"),
state='readonly'
)
self.cbColumns.current(int(self.config.columns["value"]) - 1)
self.cbColumns.bind("<<ComboboxSelected>>", lambda x: self.columns_changed())
self.cbColumns.grid(row=1, column=2, padx=(0, 10), pady=(5, 0), sticky="ew")
var = tkinter.BooleanVar()
compact_mode_checkbox = tkinter.Checkbutton(
master=layout_frame,
text=self.config.use_compact_mode["friendlyName"],
variable=var
)
if self.config.use_compact_mode["value"]:
compact_mode_checkbox.select()
compact_mode_checkbox.config(command=lambda arg=compact_mode_checkbox: self.checkbox_changed(arg))
self.variables[compact_mode_checkbox["text"]] = var
compact_mode_checkbox.grid(row=2, column=1, padx=(5, 0), pady=(5, 0), columnspan=2, sticky="w")
var = tkinter.BooleanVar()
allow_duplicates_checkbox = tkinter.Checkbutton(
master=layout_frame,
text=self.config.allow_duplicates["friendlyName"],
variable=var
)
if self.config.allow_duplicates["value"]:
allow_duplicates_checkbox.select()
allow_duplicates_checkbox.config(command=lambda arg=allow_duplicates_checkbox: self.checkbox_changed(arg))
self.variables[allow_duplicates_checkbox["text"]] = var
allow_duplicates_checkbox.grid(row=3, column=1, padx=(5, 0), pady=(0, 0), columnspan=2, sticky="w")
available_icons_label = tkinter.Label(
master=layout_frame,
text="Available Items :"
)
available_icons_label.grid(row=4, column=1, padx=(5, 0), pady=(5, 0), sticky="w")
self.lblAvailableIcons = tkinter.Label(
master=layout_frame,
text=str(self.availableIcons)
)
self.lblAvailableIcons.grid(row=4, column=2, pady=(5, 0), sticky="w")
required_items_label = tkinter.Label(
master=layout_frame,
text="Required Items :"
)
required_items_label.grid(row=5, column=1, padx=(5, 0), pady=(5, 0), sticky="w")
self.lblRequiredIcons = tkinter.Label(
master=layout_frame,
text=str(self.requiredIcons)
)
self.lblRequiredIcons.grid(row=5, column=2, padx=(0, 5), pady=(5, 0), sticky="w")
seed_label = tkinter.Label(
master=layout_frame,
text="Seed:"
)
seed_label.grid(row=7, column=1, padx=(5, 0), pady=(50, 0), sticky="w")
self.tfSeed = tkinter.Text(
master=layout_frame,
height=1,
width=15
)
self.tfSeed.grid(row=7, column=1, columnspan=2, padx=(40, 10), pady=(50, 0), sticky='ew')
self.btnGenerate = tkinter.Button(
master=layout_frame,
compound=tkinter.BOTTOM,
text="Generate!",
command=self.generate_bingo_board
)
self.btnGenerate.grid(row=8, column=1, columnspan=2, padx=(10, 10), pady=(5, 20), sticky="ew")
self.calculate_available_icons()
if self.availableIcons > 0 and self.config.allow_duplicates["value"]:
self.lblAvailableIcons["text"] = "Infinite"
self.calculate_required_icons()
self.validate_required_icons()
def calculate_available_icons(self):
self.availableIcons = 0
self.candidates = {}
tile_data = self.config.get_tile_data()
tag_data = self.config.get_tag_data()
for key in tile_data.keys():
tile_enabled_by_tags = True
if not tile_data[key]['enabled']:
# Not enabled - do not check tags, do not pass go
continue
for tag_key in tile_data[key]['tags']:
if not tag_data[tag_key]['enabled']:
tile_enabled_by_tags = False
if tile_enabled_by_tags:
self.availableIcons += 1
self.candidates[key] = tile_data[key]
self.lblAvailableIcons["text"] = str(self.availableIcons)
def calculate_required_icons(self):
self.requiredIcons = int(self.cbRows.get()) * int(self.cbColumns.get())
self.lblRequiredIcons["text"] = str(self.requiredIcons)
def icon_changed(self, arg):
icon_name = str.lower(arg["text"])
state = self.variables[arg["text"]].get()
for key in self.config.get_tile_data().keys():
if key == icon_name:
if state == 0:
self.config.get_tile_data()[key]['enabled'] = False
else:
self.config.get_tile_data()[key]['enabled'] = True
self.calculate_available_icons()
self.validate_required_icons()
self.config.save_settings()
def tag_changed(self, arg):
tag_name = str.lower(arg['text'])
state = self.variables[arg['text']].get()
for key in self.config.get_tag_data().keys():
if key == tag_name:
if state == 0:
self.config.get_tag_data()[key]['enabled'] = False
else:
self.config.get_tag_data()[key]['enabled'] = True
self.calculate_available_icons()
self.validate_required_icons()
self.config.save_settings()
def checkbox_changed(self, arg):
variable = str.lower(arg['text'])
state = self.variables[arg["text"]].get()
print(variable)
if variable == "use compact mode":
if state == 0:
self.config.set_use_compact_mode(False)
else:
self.config.set_use_compact_mode(True)
elif variable == "allow duplicates":
if state == 0:
self.config.set_allow_duplicates(False)
else:
self.config.set_allow_duplicates(True)
self.config.save_settings()
self.calculate_available_icons()
if self.availableIcons > 0 and self.config.allow_duplicates["value"]:
self.lblAvailableIcons["text"] = | |
got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a vizier_service.ListStudiesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, vizier_service.ListStudiesRequest):
request = vizier_service.ListStudiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_studies]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListStudiesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_study(
self,
request: Union[vizier_service.DeleteStudyRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a Study.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_delete_study():
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteStudyRequest(
name="name_value",
)
# Make the request
client.delete_study(request=request)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest, dict]):
The request object. Request message for
[VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy].
name (str):
Required. The name of the Study resource to be deleted.
Format:
``projects/{project}/locations/{location}/studies/{study}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a vizier_service.DeleteStudyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, vizier_service.DeleteStudyRequest):
request = vizier_service.DeleteStudyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_study]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def lookup_study(
self,
request: Union[vizier_service.LookupStudyRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> study.Study:
r"""Looks a study up using the user-defined display_name field
instead of the fully qualified resource name.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_lookup_study():
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.LookupStudyRequest(
parent="parent_value",
display_name="display_name_value",
)
# Make the request
response = client.lookup_study(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.LookupStudyRequest, dict]):
The request object. Request message for
[VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy].
parent (str):
Required. The resource name of the Location to get the
Study from. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.types.Study:
A message representing a Study.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a vizier_service.LookupStudyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, vizier_service.LookupStudyRequest):
request = vizier_service.LookupStudyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.lookup_study]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def suggest_trials(
self,
request: Union[vizier_service.SuggestTrialsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Adds one or more Trials to a Study, with parameter values
suggested by Vertex AI Vizier. Returns a long-running operation
associated with the generation of Trial suggestions. When this
long-running operation succeeds, it will contain a
[SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_suggest_trials():
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SuggestTrialsRequest(
parent="parent_value",
suggestion_count=1744,
client_id="client_id_value",
)
# Make the request
operation = client.suggest_trials(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest, dict]):
The request object. Request message for
[VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse`
Response message for
[VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a vizier_service.SuggestTrialsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, vizier_service.SuggestTrialsRequest):
request = vizier_service.SuggestTrialsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.suggest_trials]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
vizier_service.SuggestTrialsResponse,
metadata_type=vizier_service.SuggestTrialsMetadata,
)
# Done; return the response.
return response
def create_trial(
self,
request: Union[vizier_service.CreateTrialRequest, dict] = None,
*,
parent: str = None,
trial: study.Trial = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> study.Trial:
r"""Adds a user provided Trial to a Study.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_create_trial():
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateTrialRequest(
parent="parent_value",
)
# Make the request
response = client.create_trial(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrialRequest, dict]):
The request object. Request message for
[VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial].
parent (str):
Required. The resource name of the Study to create the
Trial in. Format:
``projects/{project}/locations/{location}/studies/{study}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
| |
<filename>src/biome/text/model.py
import inspect
import json
import logging
import os
import pickle
import warnings
from functools import lru_cache
from logging.handlers import RotatingFileHandler
from pathlib import Path
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Union
import allennlp
import pytorch_lightning as pl
import torch
from allennlp.common import Params
from allennlp.common.util import sanitize
from allennlp.data import Instance
from allennlp.data import Vocabulary
from allennlp.models.archival import CONFIG_NAME
from biome.text import vocabulary
from biome.text.backbone import ModelBackbone
from biome.text.configuration import PipelineConfiguration
from biome.text.configuration import PredictionConfiguration
from biome.text.featurizer import FeaturizeError
from biome.text.helpers import split_signature_params_by_predicate
from biome.text.modules.heads import TaskHead
from biome.text.modules.heads import TaskPrediction
class _HashDict(dict):
"""
Hashable dict implementation.
BE AWARE! Since dicts are mutable, the hash can change!
"""
def __hash__(self):
return pickle.dumps(self).__hash__()
class _HashList(list):
"""
Hashable list implementation.
BE AWARE! Since lists are mutable, the hash can change!
"""
def __hash__(self):
return pickle.dumps(self).__hash__()
class PipelineModel(allennlp.models.Model, pl.LightningModule):
"""
This class represents pipeline model implementation for connect biome.text concepts with
allennlp implementation details
This class manages the head + backbone encoder, keeping the allennlnlp Model lifecycle. This class
should be hidden to api users.
Parameters
----------
config
Configuration of the pipeline
vocab
The vocabulary of the pipeline. If None, an empty vocabulary will be created (default).
Attributes
----------
name: str
Name of the pipeline model
head: TaskHead
TaskHead of the pipeline model
vocab: Vocabulary
The vocabulary of the model, comes from allennlp.models.Model
file_path: Optional[str]
File path to a serialized version of this pipeline model
inputs: List[str]
The model inputs
output: List[str]
The model outputs (not prediction): Corresponding to the `TaskHead.featurize` optional arguments.
"""
PREDICTION_FILE_NAME = "predictions.json"
TRAINING_METRICS_PREFIX = "training"
VALIDATION_METRICS_PREFIX = "validation"
TEST_METRICS_PREFIX = "test"
_LOGGER = logging.getLogger(__name__)
def __init__(self, config: Dict, vocab: Optional[Vocabulary] = None):
super().__init__(vocab=vocab or vocabulary.create_empty_vocabulary())
# saves the config in the pl checkpoints
self.save_hyperparameters("config")
config = PipelineConfiguration.from_dict(config)
tokenizer = config.build_tokenizer()
featurizer = config.features.compile_featurizer(tokenizer)
embedder = config.build_embedder(self.vocab)
head = config.head.compile(
backbone=ModelBackbone(
self.vocab,
featurizer=featurizer,
embedder=embedder,
encoder=config.encoder,
)
)
self.name = config.name
self._head = None
self.set_head(head)
self.file_path: Optional[str] = None
self.optimizer: Optional[torch.optim.Optimizer] = None
# The lr_scheduler dict follows the Lightning format:
# https://pytorch-lightning.readthedocs.io/en/stable/common/optimizers.html#learning-rate-scheduling
self.lr_scheduler: Optional[Dict] = None
self.best_metrics: Optional[Dict[str, torch.Tensor]] = None
# This is set by our trainer to figure out the best_metrics
# what metric to monitor?
self.monitor: Optional[str] = None
# shall the metric increase ("max") or decrease ("min")?
self.monitor_mode: Optional[str] = None
def _update_head_related_attributes(self):
"""Updates the inputs/outputs and default mapping attributes, calculated from model head"""
required, optional = split_signature_params_by_predicate(
self._head.featurize, lambda p: p.default == inspect.Parameter.empty
)
self._inputs = self._head.inputs() or [p.name for p in required]
self._output = [p.name for p in optional if p.name not in self._inputs] or [
None
]
@classmethod
def from_params(
cls: "PipelineModel",
params: Params,
vocab: Optional[Vocabulary] = None,
**extras,
) -> "PipelineModel":
"""
Load the model implementation from params. We build manually each component from config sections.
The param keys matches exactly with keys in yaml configuration files
Parameters
----------
params
The config key in these params is used to build the model components
vocab
The vocabulary for the model
**extras
Necessary for AllenNLP from_params machinery
Returns
-------
pipeline_model
"""
config = params.pop("config", keep_as_dict=True)
return cls(config=config, vocab=vocab)
@property
def head(self) -> TaskHead:
"""Get the model head"""
return self._head
def set_head(self, head: TaskHead) -> None:
"""Set a head and update related model attributes"""
self._head = head
self._update_head_related_attributes()
def forward(self, *args, **kwargs) -> Dict[str, torch.Tensor]:
"""The main forward method just wraps the head forward method"""
return self._head(*args, **kwargs)
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""Fetch metrics defined in head layer"""
return self._head.get_metrics(reset)
def text_to_instance(self, **inputs) -> Optional[Instance]:
"""Applies the head featurize method"""
try:
return self._head.featurize(**inputs)
except FeaturizeError as error:
# we cannot featurize the input (empty strings, etc.)
self._LOGGER.warning(error)
except TypeError as error:
# probably wrong input arguments for the head
raise TypeError(
f"Please check your input arguments, expected: {self.inputs}, actual: {inputs.keys()}"
) from error
def extend_vocabulary(self, vocab: Vocabulary):
"""Extend the model's vocabulary with `vocab`
Parameters
----------
vocab
The model's vocabulary will be extended with this one.
"""
# self.vocab and self._head.backbone.vocab point to the same vocab!
self.vocab.extend_from_vocab(vocab)
# updates the embedding matrices
self.extend_embedder_vocab()
# updates head specific things
self._head.on_vocab_update()
def extend_embedder_vocab(
self, embedding_sources_mapping: Dict[str, str] = None
) -> None:
"""
Iterates through all embedding modules in the model and assures it can embed
with the extended vocab. This is required in fine-tuning or transfer learning
scenarios where model was trained with original vocabulary but during
fine-tuning/transfer-learning, it will have it work with extended vocabulary
(original + new-data vocabulary).
# Parameters
embedding_sources_mapping : `Dict[str, str]`, optional (default = `None`)
Mapping from model_path to pretrained-file path of the embedding
modules. If pretrained-file used at time of embedding initialization
isn't available now, user should pass this mapping. Model path is
path traversing the model attributes upto this embedding module.
Eg. "_text_field_embedder.token_embedder_tokens".
"""
# self.named_modules() gives all sub-modules (including nested children)
# The path nesting is already separated by ".": eg. parent_module_name.child_module_name
embedding_sources_mapping = embedding_sources_mapping or {}
for model_path, module in self.named_modules():
if hasattr(module, "extend_vocab"):
pretrained_file = embedding_sources_mapping.get(model_path)
# Show useful information when reading from a pretrained file, kind of an ugly hack
if module._pretrained_file is not None:
original_logging_level = logging.getLogger("allennlp").level
logging.getLogger("allennlp").setLevel("INFO")
module.extend_vocab(
self.vocab,
extension_pretrained_file=pretrained_file,
model_path=model_path,
)
if module._pretrained_file is not None:
logging.getLogger("allennlp").setLevel(original_logging_level)
@property
def inputs(self) -> List[str]:
"""The model inputs. Corresponding to head.featurize required argument names"""
return self._inputs
@property
def output(self) -> List[str]:
"""The model outputs (not prediction): Corresponding to head.featurize optional argument names."""
return self._output
def init_prediction_logger(
self, output_dir: str, max_bytes: int = 20000000, backup_count: int = 20
):
"""Initialize the prediction logger.
If initialized we will log all predictions to a file called *predictions.json* in the `output_folder`.
Parameters
----------
output_dir
Path to the folder in which we create the *predictions.json* file.
max_bytes
Passed on to logging.handlers.RotatingFileHandler
backup_count
Passed on to logging.handlers.RotatingFileHandler
"""
Path(output_dir).mkdir(parents=True, exist_ok=True)
predictions_logger = logging.getLogger(output_dir)
predictions_logger.setLevel(logging.DEBUG)
# This flag avoids logging messages to be propagated to the parent loggers
predictions_logger.propagate = False
file_handler = RotatingFileHandler(
os.path.join(output_dir, self.PREDICTION_FILE_NAME),
maxBytes=max_bytes,
backupCount=backup_count,
)
file_handler.setLevel(logging.INFO)
predictions_logger.addHandler(file_handler)
setattr(self, "_prediction_logger", predictions_logger)
def init_prediction_cache(self, max_size: int) -> None:
"""Initialize a prediction cache using the functools.lru_cache decorator
Parameters
----------
max_size
Save up to max_size most recent items.
"""
if hasattr(self, "_predict_with_cache"):
warnings.warn(
"Prediction cache already initiated!", category=RuntimeWarning
)
return
predict_with_cache = lru_cache(maxsize=max_size)(self.predict)
def predict_wrapper(*args, **kwargs):
def hashable_value(value) -> Any:
if isinstance(value, dict):
return _HashDict(value)
if isinstance(value, (list, tuple)):
return _HashList(value)
return value
return predict_with_cache(
*[hashable_value(arg_value) for arg_value in args],
**{
key: hashable_value(input_value)
for key, input_value in kwargs.items()
},
)
self.__setattr__("predict", predict_wrapper)
self.__setattr__("_predict_with_cache", predict_with_cache)
def _log_predictions(
self,
batch: Iterable[Dict[str, Any]],
predictions: Iterable[TaskPrediction],
) -> None:
"""Log predictions to a file for a model analysis and feedback sessions.
Parameters
----------
batch
Input data to the model
predictions
Returned predictions from the model
"""
for input_dict, prediction in zip(batch, predictions):
self._prediction_logger.info(
json.dumps(
dict(inputs=input_dict, prediction=sanitize(prediction.as_dict()))
)
)
def predict(
self,
batch: List[Dict[str, Union[str, List[str], Dict[str, str]]]],
prediction_config: PredictionConfiguration,
) -> List[TaskPrediction]:
"""Returns predictions given some input data based on the current state of the model
The keys of the input dicts in the batch must coincide with the `self.inputs` attribute.
TODO: Comply with LightningModule API + Trainer API (means move instance creation logic to Pipeline)
Parameters
----------
batch
A list of dictionaries that represents a batch of inputs.
The dictionary keys must comply with the `self.inputs` attribute.
prediction_config
Contains configurations for the prediction
Returns
-------
List of task predictions
"""
if self.training:
self.eval()
instances = [self.text_to_instance(**input_dict) for input_dict in batch]
# Filter out None instances, that is when the head could not create an instance out of the input
none_indices, not_none_instances = [], []
for i, instance in enumerate(instances):
if instance is None:
none_indices.append(i)
else:
not_none_instances.append(instance)
if not not_none_instances:
return [self.head.empty_prediction] * len(batch)
try:
forward_outputs = self.forward_on_instances(not_none_instances)
except Exception as error:
input_examples = [
example for i, example in enumerate(batch) if i not in none_indices
]
self._LOGGER.exception(error)
self._LOGGER.warning(
f"Failed to make a forward pass for '{input_examples}'"
)
return [self.head.empty_prediction] * len(batch)
predictions = []
for forward_output, | |
import pygame
import time
from pygame.constants import MOUSEBUTTONDOWN
# board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
# board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
# All colors used
white = (255, 255, 255)
yellow = (255, 255, 102)
black = (0, 0, 0)
red = (213, 50, 80)
green = (0, 255, 0)
green2 = (92,182,107)
dark_green = (4,75,20)
blue = (50, 153, 213)
pygame.init()
dis_width = 450
dis_height = 500
strike_img = pygame.image.load(r'./strike.png') # Snake's head image
dis = pygame.display.set_mode((dis_width,dis_height))
pygame.display.set_caption('Sudoku by Nassos')
myFont = pygame.font.SysFont("Arial", 25, True)
clock = pygame.time.Clock()
solved_board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2],
[ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
[ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
[ 2, 6, 3, 4, 1, 5, 9, 8, 7 ],
[ 9, 7, 4, 8, 6, 3, 1, 2, 5 ],
[ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
[ 1, 3, 8, 9, 4, 7, 2, 5, 6 ],
[ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
[ 7, 4, 5, 2, 8, 6, 3, 1, 9 ]]
# board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
# board2 = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
def backtrack():
for i in range(9):
for j in range(9):
if board[i][j] == 0: # It must be solved
flag = False
for z in range(9):
if isSafe(i,j,z+1) == True:
board[i][j] = z + 1;
flag = True
if isSolved() == True:
print("EEEEEEEEE")
return True
# backtrack()
if flag == False: # number is 0 and it didnt changed
print("--------------")
for k in range(9):
print(board[k])
if isSolved() == True:
print("EEEEEEEEE")
return True
backtrack()
# def backtrack2():
# for i in range(9):
# for j in range(9):
# if ((board[i][j] == 0) or (board[i][j] == 0 and (isSafe(i,j, board[i][j]) == False))): # It must be solved
# print("gia row " , i , " col " ,j , " num: " , board[i][j])
# for z in range(9):
# if isSafe(i,j,z+1) == True:
# if i == 8 and j == 8:
# if isSolved == True:
# print("EEEEEEEEE")
# return True
# else:
# flag = True
# # backtrack()
# backtrack()
def isSolved(boole = False):
for i in range(9):
for j in range(9):
if isSafe(i, j, board[i][j]) == False or board[i][j] == 0:
print("row " , i , " col " ,j , " num: " , board[i][j])
return False
return True
# Checks whether it will be legal to assign num to the given row, col
def isSafe(row, col, num):
# Check if we find the same num in the similar row , we return false
for x in range(9):
if board[row][x] == num and x != col:
return False
# Check if we find the same num in the similar column , we retutn false
for x in range(9):
if board[x][col] == num and x != row:
return False
# Check if we find the same num in the particular 3*3 matrix, return false
startRow = row - row % 3
startCol = col - col % 3
for i in range(3):
for j in range(3):
if board[i + startRow][j + startCol] == num and i + startRow != row and j + startCol != col:
return False
return True
def drawGrid():
blockSize = 50 # Set the size of the grid block
# Draw the horizontal lines
pygame.draw.line(dis, black, (0, 150), (dis_width, 150), 4)
pygame.draw.line(dis, black, (0, 300), (dis_width, 300), 4)
pygame.draw.line(dis, black, (0, 450), (dis_width, 450), 4)
# Draw the vertical lines
pygame.draw.line(dis, black, (150, 0), (150, dis_height - 50), 4)
pygame.draw.line(dis, black, (300, 0), (300, dis_height - 50), 4)
for x in range(0, dis_width, blockSize):
for y in range(0, dis_height - 50, blockSize):
rect = pygame.Rect(x, y, blockSize, blockSize)
pygame.draw.rect(dis, black, rect, 1)
def displayBoard(tempBoard, strikes):
dis.fill(white)
drawGrid()
displayNumbers(tempBoard)
strike_width = 5
# Print the strikes
for x in range(strikes):
dis.blit(pygame.transform.scale(strike_img, (30, 30)), (strike_width, dis_height - 35))
strike_width += 35
pygame.display.update()
def gameLoop():
strikes = 0
game_over = False
game_close = False
startTime = time.time()
white_rect = pygame.Rect(0, 455, dis_width, 455)
pygame.draw.rect(dis, white, white_rect)
displayBoard(board, strikes)
while not game_over:
small_white_rect = pygame.Rect(dis_width - 100, 455, dis_width, 455)
pygame.draw.rect(dis, white, small_white_rect)
randNumLabel = myFont.render("Time: " + str(time.time() - startTime), 1, black)
dis.blit(randNumLabel, (340, 465))
pygame.display.update()
while game_close == True:
# message("You Lost! Press C-Play Again or Q-Quit", red)
# Your_score(Length_of_snake - 1)
# pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_over = True
game_close = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.MOUSEBUTTONDOWN:
posx = pos[0]
posy = pos[1]
x_block = (pos[0] // 50) * 50
y_block = (pos[1] // 50) * 50
if y_block > 400:
continue
displayBoard(board, strikes)
pygame.draw.rect(dis, red, (x_block, y_block, 50, 50), 4) # Draw the small red rectangle
# pygame.display.update()
print("to x einai: ",x_block , " kai to y einai: " , y_block)
if event.type == pygame.KEYDOWN: # Get the number pressed
num = -10
if event.key == pygame.K_1:
num = 1
if event.key == pygame.K_2:
num = 2
if event.key == pygame.K_3:
num = 3
if event.key == pygame.K_4:
num = 4
if event.key == pygame.K_5:
num = 5
if event.key == pygame.K_6:
num = 6
if event.key == pygame.K_7:
num = 7
if event.key == pygame.K_8:
num = 8
if event.key == pygame.K_9:
num = 9
if event.key == pygame.K_SPACE: # Give the answer
print("PATHSES SPACE \n")
displayBoard(solved_board, strikes)
| |
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
def _test_clean_up_ramdisk(self, get_image_info_mock,
clean_up_pxe_env_mock, mode='deploy'):
with task_manager.acquire(self.context, self.node.uuid) as task:
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
image_info = {kernel_label: ['', '/path/to/' + kernel_label],
ramdisk_label: ['', '/path/to/' + ramdisk_label]}
get_image_info_mock.return_value = image_info
task.driver.boot.clean_up_ramdisk(task)
clean_up_pxe_env_mock.assert_called_once_with(task, image_info)
get_image_info_mock.assert_called_once_with(task.node, mode=mode)
def test_clean_up_ramdisk(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_clean_up_ramdisk()
def test_clean_up_ramdisk_rescue(self):
self.node.provision_state = states.RESCUING
self.node.save()
self._test_clean_up_ramdisk(mode='rescue')
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'bios', False, False, False, False, ipxe_enabled=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch('os.path.isfile', return_value=False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_active(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock, isfile_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.pxe_config_template,
ipxe_enabled=False)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'bios', False, False, False, False, ipxe_enabled=False)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory')
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_missing_root_uuid(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
self.assertFalse(switch_pxe_config_mock.called)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory')
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_whole_disk_image_missing_root_uuid(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, set_boot_device_mock,
clean_up_pxe_mock, log_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
get_image_info_mock.return_value = {}
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, CONF.pxe.ipxe_enabled)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, {}, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
self.assertTrue(log_mock.called)
clean_up_pxe_mock.assert_called_once_with(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK, persistent=True)
@mock.patch('os.path.isfile', lambda filename: False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(deploy_utils, 'is_iscsi_boot', lambda task: True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
lambda task: False)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_iscsi(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock):
http_url = 'http://192.1.2.3:1234'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url=http_url, group='deploy')
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
vol_id = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': '<PASSWORD>'})
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_internal_info = {
'boot_from_volume': vol_id}
dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
ipxe_enabled=True)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.driver.boot.prepare_instance(task)
self.assertFalse(get_image_info_mock.called)
self.assertFalse(cache_mock.called)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.pxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.LEGACY_BIOS, False,
ipxe_enabled=True, ramdisk_boot=False, iscsi_boot=True)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
def test_prepare_instance_localboot(self, clean_up_pxe_config_mock,
set_boot_device_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'local'}
task.node.instance_info = instance_info
task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
def test_prepare_instance_localboot_active(self, clean_up_pxe_config_mock,
set_boot_device_mock):
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'local'}
task.node.instance_info = instance_info
task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def _test_prepare_instance_ramdisk(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, create_pxe_config_mock,
switch_pxe_config_mock,
set_boot_device_mock, config_file_exits=False):
image_info = {'kernel': ['', '/path/to/kernel'],
'ramdisk': ['', '/path/to/ramdisk']}
get_image_info_mock.return_value = image_info
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
self.node.provision_state = states.DEPLOYING
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'ramdisk'}
task.node.instance_info = instance_info
task.node.save()
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, image_info, CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
if config_file_exits:
self.assertFalse(create_pxe_config_mock.called)
else:
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.pxe_config_template,
ipxe_enabled=False)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None,
'bios', False, ipxe_enabled=False, iscsi_boot=False,
ramdisk_boot=True)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(os.path, 'isfile', lambda path: True)
def test_prepare_instance_ramdisk_pxe_conf_missing(self):
self._test_prepare_instance_ramdisk(config_file_exits=True)
@mock.patch.object(os.path, 'isfile', lambda path: False)
def test_prepare_instance_ramdisk_pxe_conf_exists(self):
self._test_prepare_instance_ramdisk(config_file_exits=False)
@mock.patch.object(pxe_utils, 'clean_up_pxe_env', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_clean_up_instance(self, get_image_info_mock,
clean_up_pxe_env_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
image_info = {'kernel': ['', '/path/to/kernel'],
'ramdisk': ['', '/path/to/ramdisk']}
get_image_info_mock.return_value = image_info
task.driver.boot.clean_up_instance(task)
clean_up_pxe_env_mock.assert_called_once_with(task, image_info)
get_image_info_mock.assert_called_once_with(task)
class PXERamdiskDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(PXERamdiskDeployTestCase, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self.config(tftp_root=self.temp_dir, group='pxe')
self.temp_dir = tempfile.mkdtemp()
self.config(images_path=self.temp_dir, group='pxe')
self.config(enabled_deploy_interfaces=['ramdisk'])
self.config(enabled_boot_interfaces=['pxe'])
for iface in drivers_base.ALL_INTERFACES:
impl = 'fake'
if iface == 'network':
impl = 'noop'
if iface == 'deploy':
impl = 'ramdisk'
if iface == 'boot':
impl = 'pxe'
config_kwarg = {'enabled_%s_interfaces' % iface: [impl],
'default_%s_interface' % iface: impl}
self.config(**config_kwarg)
self.config(enabled_hardware_types=['fake-hardware'])
instance_info = INST_INFO_DICT
self.node = obj_utils.create_test_node(
self.context,
driver='fake-hardware',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_ramdisk(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
self.node.provision_state = states.DEPLOYING
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_option:netboot'
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.deploy.prepare(task)
task.driver.deploy.deploy(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None,
'bios', False, ipxe_enabled=False, iscsi_boot=False,
ramdisk_boot=True)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_deploy(self, mock_image_info, mock_cache,
mock_dhcp_factory, mock_switch_config, mock_warning):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
mock_image_info.return_value = image_info
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'ramdisk'}})
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
mock_image_info.assert_called_once_with(task)
mock_cache.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
self.assertFalse(mock_warning.called)
i_info['configdrive'] = 'meow'
self.node.instance_info = i_info
self.node.save()
mock_warning.reset_mock()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
self.assertTrue(mock_warning.called)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare(self, mock_prepare_instance):
node = self.node
node.provision_state = states.DEPLOYING
node.instance_info = {}
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
self.assertFalse(mock_prepare_instance.called)
self.assertEqual({'boot_option': 'ramdisk'},
task.node.instance_info['capabilities'])
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_active(self, mock_prepare_instance):
node = self.node
node.provision_state = states.ACTIVE
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
mock_prepare_instance.assert_called_once_with(mock.ANY, task)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_unrescuing(self, mock_prepare_instance):
node = self.node
node.provision_state = states.UNRESCUING
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
mock_prepare_instance.assert_called_once_with(mock.ANY, task)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_fixes_and_logs_boot_option_warning(
self, mock_prepare_instance, mock_warning):
node = self.node
node.properties['capabilities'] = 'boot_option:ramdisk'
node.provision_state = states.DEPLOYING
node.instance_info = {}
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
self.assertFalse(mock_prepare_instance.called)
self.assertEqual({'boot_option': 'ramdisk'},
task.node.instance_info['capabilities'])
self.assertTrue(mock_warning.called)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
def test_validate(self, mock_validate_img):
node = self.node
node.properties['capabilities'] = 'boot_option:netboot'
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.validate(task)
self.assertTrue(mock_validate_img.called)
@mock.patch.object(fake.FakeBoot, 'validate', autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
def test_validate_interface_mismatch(self, mock_validate_image,
mock_boot_validate):
node = self.node
node.boot_interface = 'fake'
node.save()
self.config(enabled_boot_interfaces=['fake'],
default_boot_interface='fake')
with task_manager.acquire(self.context, node.uuid) as task:
error = self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
error_message = ('Invalid configuration: The boot interface must '
'have the `ramdisk_boot` capability. You are '
'using an incompatible boot interface.')
self.assertEqual(error_message, str(error))
self.assertFalse(mock_boot_validate.called)
self.assertFalse(mock_validate_image.called)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_calls_boot_validate(self, mock_validate):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.validate(task)
mock_validate.assert_called_once_with(mock.ANY, task)
@mock.patch.object(manager_utils, 'restore_power_state_if_needed',
autospec=True)
@mock.patch.object(manager_utils, 'power_on_node_if_needed',
autospec=True)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_deploy_with_smartnic_port(
self, mock_image_info, mock_cache,
mock_dhcp_factory, mock_switch_config, mock_warning,
power_on_node_if_needed_mock, restore_power_state_mock):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
mock_image_info.return_value = image_info
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'ramdisk'}})
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
power_on_node_if_needed_mock.return_value = states.POWER_OFF
self.assertIsNone(task.driver.deploy.deploy(task))
mock_image_info.assert_called_once_with(task)
mock_cache.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
self.assertFalse(mock_warning.called)
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
i_info['configdrive'] = 'meow'
self.node.instance_info = i_info
self.node.save()
mock_warning.reset_mock()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
self.assertTrue(mock_warning.called)
class PXEValidateRescueTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEValidateRescueTestCase, self).setUp()
for iface in drivers_base.ALL_INTERFACES:
impl = 'fake'
if iface == 'network':
impl = 'flat'
if iface == 'rescue':
impl = 'agent'
if iface == 'boot':
impl = 'pxe'
config_kwarg = {'enabled_%s_interfaces' % iface: [impl],
'default_%s_interface' % iface: impl}
self.config(**config_kwarg)
self.config(enabled_hardware_types=['fake-hardware'])
driver_info = DRV_INFO_DICT
driver_info.update({'rescue_ramdisk': 'my_ramdisk',
'rescue_kernel': 'my_kernel'})
instance_info = INST_INFO_DICT
instance_info.update({'rescue_password': 'password'})
n = {
'driver': 'fake-hardware',
'instance_info': instance_info,
'driver_info': driver_info,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
self.node = | |
from celery import Celery,chain
from app import app,celery,db
import redisearch
import time,sys
import pandas as pd
import billiard as mp #multiprocessing substitute to enable daemon
import scipy.stats
import numpy as np
import os
sys.path.insert(0, 'app')
import controller.utils as utils
from timeit import default_timer as timer
# input: table
@celery.task(bind=True)
def inittbl(self,filename,cpath):
self.update_state(state='PROGRESS',
meta={'current': 0, 'total': 1, 'status': 'Preprocessing input...'})
kmer = 6
start = time.time()
file_extension = os.path.splitext(filename)[1]
result = []
error = ""
# TODO: if fast enough, we can also put error checking in here
if file_extension == ".txt":
with open(filename) as f:
idx = 0
for line in f:
if "\t" in line:
line = line.split("\t")
else:
line = line.split()
idx += 1
# line[1] is the base mid nucleotide mutated to
escore_seq = line[0] + line[1]
mid_seq = escore_seq[len(escore_seq)//2-6:len(escore_seq)//2+5] + line[1] # the 12mer seq
result.append([idx,mid_seq,escore_seq,utils.seqtoi(mid_seq),0,0,"None"])
else:
if file_extension == ".vcf":
df = pd.read_csv(filename,sep="\t",header=None).drop(2,1)
df = df.rename(columns={0:"chromosome",1:"pos",3:"mutated_from",4:"mutated_to"})
df['chromosome'] = df['chromosome'].map(lambda x:x.replace("chr",""))
else:
if file_extension == ".tsv":
separator = "\t"
else: # must be csv since we checked it, TODO: can also return error here
separator = ","
df = pd.read_csv(filename, sep=separator)
# if icgc then only take a subset of the columns
if set(['chromosome','chromosome_start','mutation_type','mutated_from_allele','mutated_to_allele']).issubset(df.columns):
df = df[['chromosome','chromosome_start','mutation_type','mutated_from_allele','mutated_to_allele']]
df = df[df['mutation_type'].apply(lambda x: "single base substitution" == x)].drop('mutation_type',1).drop_duplicates() # only take single base mutation
df = df.rename(columns={"chromosome_start":"pos","mutated_from_allele":"mutated_from","mutated_to_allele":"mutated_to"})
else: # ['chromosome', 'chromosome_pos', 'mutated_from', 'mutated_to']
df = df.rename(columns={"chromosome_pos":"pos","mutated_from_allele":"mutated_from","mutated_to_allele":"mutated_to"})
grouped = df.groupby('chromosome',sort=True)
dataset = {str(key):item for key,item in grouped}
for cidx in [str(a) for a in range(1,23)] + ['X','Y']:
self.update_state(state='PROGRESS',
meta={'current': 0, 'total': 1, 'status': 'Preprocessing input for chromosome {}...'.format(cidx)})
if cidx not in dataset:
continue
print("Iterating dataset for chromosome {}...".format(cidx))
chromosome = utils.get_chrom(cpath + "/chr." + str(cidx) + '.fa.gz')
for idx,row in dataset[cidx].iterrows():
pos = row['pos'] - 1
if row['mutated_from'] != chromosome[pos]:
cver = cpath.split("/")[-1]
error = "For the input mutation %s>%s at position %s in chromosome %s, the mutated_from nucleotide (%s) does not match the nucleotide in the %s reference genome (%s). Please check the input data and verify that the correct version of the reference human genome was selected in the Data Submission Form." % (row['mutated_from'], row['mutated_to'], row['pos'], row['chromosome'], row['mutated_from'], cver, chromosome[pos])
#error = "Found mismatch in the mutation: chromosome %s pos %s mutated_from: %s; but expected: %s. Input mutation coordinate is probably incorrect or different genome version is probably used.\n" % (row['chromosome'],row['pos'],row['mutated_from'],chromosome[pos])
break
seq = chromosome[pos-kmer+1:pos+kmer] + row['mutated_to'] #-5,+6
# for escore, just use 8?
escore_seq = chromosome[pos-9+1:pos+9] + row['mutated_to']
result.append([idx,seq,escore_seq,utils.seqtoi(seq),0,0,"None"]) #rowidx,seq,escore_seq,val,diff,t,pbmname
if error:
break
# finish parsing the file, delete it
if filename.startswith(app.config['UPLOAD_FOLDER']):
utils.delete_file(filename)
if error:
return error
else:
result = sorted(result,key=lambda result:result[0])
# example row in result: [73, 'CCAACCAACCCA', 'ATTCCAACCAACCCCCTA', 5263444, 0, 0, 'None']
print("Time to preprocess: {:.2f}secs".format(time.time()-start))
return result
#==================================== Prediction Part ====================================
def predict(predlist, dataset, ready_count,
filteropt=1, filterval=1, spec_ecutoff=0.4, nonspec_ecutoff=0.35):
'''
for the container list, key is a tuple of: (rowidx,sequence,seqidx)
and each element in value is a list if: [diff,z-score,pbmname]
return:
filteropt=1: diff,z_score,tfname
filteropt=2: diff,p_val,escore,tfname
'''
buggedtf = 0
#[96, 'TCATGGTGGGTT', GCTTCATGGTGGGTGGAT, 13872815, 0, 0, '-'] -- 37, 'GCCCAGAAAGGA', 9773096
if filteropt == 1: #t-value
container = {tuple(row[:4]):[[0,0,1,"None","None"]] for row in dataset} # rowidx,12mer,18mer,seqidx : [diff,z,p,bind,pbmname]
else: #p-value
# leave this empty as for p-value, we don't have to compare and the size is dynamic
container = {tuple(row[:4]):[] for row in dataset}
test_total_time = 0
# iterate for each transcription factor
for i in range(0,len(predlist)):
start = time.time()
pbmname = '.'.join(map(str,predlist[i].split(".")[1:-1]))
print("Processing " + pbmname)
with open(predlist[i], 'r') as f:
tflist = pd.read_csv(f, delimiter=' ').round(5).values.tolist()
if len(tflist) < 4**12:
print("Skip %s since it has less rows than 4**12" % pbmname)
buggedtf += 1
continue
for row_key in container:
seqidx = row_key[3]
diff = tflist[seqidx][0]
zscore = tflist[seqidx][1]
if np.isnan(zscore):
zscore = 0
if np.isnan(diff):
diff = 0
pval = scipy.stats.norm.sf(abs(zscore))*2
add = True
if filteropt == 1:
# if z-score is chosen then filterval is the maximum of item shown
if len(container[row_key]) >= filterval:
least_idx = min(enumerate(container[row_key]),key=lambda x:abs(x[1][1]))[0]
if abs(tflist[seqidx][1]) > abs(container[row_key][least_idx][1]):
del container[row_key][least_idx]
else:
add = False
# filteropt = 2, if z-score is chosen then filterval is the p-val threshold
elif pval > filterval:
add = False
# E-score calculation is here
if add:
if spec_ecutoff == -1 or nonspec_ecutoff == -1:
container[row_key].append([diff,zscore,pval,"N/A",pbmname])
else:
test_start = timer()
# E-score calculation: 0.05 seconds each
# For 10k rows, total: 141.34secs, from e-score 128.56331secs
# For 50k rows, total: 771.42 secs, from e-score: 752.123secs
# another example: 2547.41secs, from e-score: 2523.96897secs
isbound = utils.isbound_escore_18mer(row_key[2],pbmname,app.config['ESCORE_DIR'],spec_ecutoff,nonspec_ecutoff)
container[row_key].append([diff,zscore,pval,isbound,pbmname])
test_end = timer()
test_total_time += (test_end-test_start)
print("Total e-score time %.5f" % test_total_time)
ready_count.value += 1
print("Total running time for {}: {:.2f}secs".format(pbmname,time.time()-start))
# remove seqidx and 18mer as it is not needed anymore
newcontainer = {}
for row_key in container:
newcontainer[row_key[:-2]] = container[row_key]
return newcontainer
def read_gapfile(gapfile):
df = pd.read_csv(gapfile)
return dict(zip(df.upbm_filenames, df.gapmodel))
def format2tbl(tbl,gene_names,filteropt=1):
'''
This function saves tbl as csvstring
Input:
tbl is a dictionary of (rowidx,seq):[diff,zscore,tfname] or [diff,p-val,escore,tfname]
'''
with open(app.config['PBM_HUGO_MAPPING']) as f:
pbmtohugo = {}
for line in f:
linemap = line.strip().split(":")
pbmtohugo[linemap[0]] = linemap[1].split(",")
#gapdata = read_gapfile(app.config['GAP_FILE'])
sorted_key = sorted(tbl.keys())
datavalues = []
for row_key in sorted_key:
if not tbl[row_key]: # probably empty row
continue
row = row_key[0]
seq = row_key[1]
wild = seq[0:5] + seq[5] + seq[6:11]
mut = seq[0:5] + seq[11] + seq[6:11]
sorted_val = sorted(tbl[row_key],reverse=True,key=lambda x:abs(x[1]))
for row_val in sorted_val: # [diff,zscore,pval,isbound,pbmname]
rowdict = {'row':row,'wild':wild,'mutant':mut,'diff':row_val[0]}
pbmname = row_val[4]
rowdict['z_score'] = row_val[1]
rowdict['p_value'] = row_val[2]
rowdict['binding_status'] = row_val[3]
if pbmname == 'None':
rowdict['TF_gene'] = ""
rowdict['pbmname'] = "None"
#rowdict['gapmodel'] = "None" # vmartin: comment for now
else:
rowdict['TF_gene'] = ",".join([gene for gene in pbmtohugo[pbmname] if gene in gene_names])
rowdict['pbmname'] = pbmname
#rowdict['gapmodel'] = gapdata[pbmname] # vmartin: comment for now
datavalues.append(rowdict)
#colnames = ["row","wild","mutant","diff","z_score","p_value","TF_gene","binding_status","gapmodel","pbmname"]
colnames = ["row","wild","mutant","diff","z_score","p_value","TF_gene","binding_status","pbmname"]
return colnames,datavalues
def postprocess(datalist,gene_names,filteropt=1,filterval=1):
'''
Aggregate the result from the different processes.
'''
maintbl = {}
for ddict in datalist:
if not maintbl:
maintbl = ddict
else:
if filteropt == 1: # z-score
for row_key in ddict:
for row_val in ddict[row_key]:
least_idx = min(enumerate(maintbl[row_key]),key=lambda x:abs(x[1][1]))[0]
# row_val[1] is the t-value
if abs(row_val[1]) > abs(maintbl[row_key][least_idx][1]):
del maintbl[row_key][least_idx]
maintbl[row_key].append(row_val)
else: # filteropt == 2 -- p-value
for row_key in ddict:
maintbl[row_key].extend(ddict[row_key])
return format2tbl(maintbl,gene_names,filteropt)
#==========================================================
@celery.task()
def drop_index(task_id):
'''
Make this a celery task so we can schedule it -- done?
'''
print("Remove key/index for %s from redis"%task_id)
client = redisearch.Client(task_id)
client.drop_index()
db.delete(task_id)
db.delete("%s:cols"%task_id)
def savetoredis(req_id,colnames,datavalues,expired_time):
db.hmset("%s:cols"%req_id,{'cols':colnames})
client = redisearch.Client(req_id)
indexes = []
for col in colnames:
if "score" in col or "diff" in col or "row" in col or "z_score" in col or "p_value" in col:
indexes.append(redisearch.NumericField(col,sortable=True))
else:
indexes.append(redisearch.TextField(col,sortable=True))
client.create_index(indexes)
for i in range(0,len(datavalues)):
fields = {colnames[j]:datavalues[i][colnames[j]] for j in range(0,len(colnames))}
client.add_document("%s_%d"%(req_id,i), **fields)
# ---- set expiry for columns and documents ----
#db.expire("%s:cols"%req_id,expired_time) let's comment for now and see how it goes
drop_index.apply_async((req_id,), countdown=expired_time)
#https://github.com/MehmetKaplan/Redis_Table
@celery.task(bind=True)
def do_prediction(self, intbl, selections, gene_names,
filteropt=1, filterval=1, spec_ecutoff=0.4, nonspec_ecutoff=0.35):
'''
intbl: preprocessed table
filteropt: 1 for highest t-val, 2 for p-val cutoff
filterval: # TFs for opt 1 and p-val cutoff for opt 2
'''
if type(intbl) is str: # got an error in the pipeline from inittbl
return {'current': 1, 'total': 1, 'error': intbl}
# intbl: #rowidx,seq,val,diff,t,pbmname,escore_seq
start_time = time.time()
#while not inittask.ready():
# time.sleep(1)
#intbl = inittask.get()
# move the comment here for testing
pool = mp.Pool(processes=app.config['PCOUNT'])
predfiles = [app.config['PREDDIR'] + "/" + s for s in selections] # os.listdir(preddir)
preds = utils.chunkify(predfiles,app.config['PCOUNT']) # chunks the predfiles for each process
# need to use manager here
shared_ready_sum = mp.Manager().Value('i', 0)
async_pools = [pool.apply_async(predict, (preds[i], intbl, shared_ready_sum, filteropt, filterval, spec_ecutoff, nonspec_ecutoff)) for i in range(0,len(preds))]
# run the job, update progress bar
total = len(predfiles)
while not all([p.ready() for p in async_pools]):
time.sleep(2) # super important to avoid checking every loop
self.update_state(state='PROGRESS',
meta={'current': shared_ready_sum.value, 'total': total, 'status': 'Processing input data...'})
res = [p.get() for p in async_pools]
self.update_state(state='PROGRESS',
meta={'current': shared_ready_sum.value, 'total': total, 'status': 'post-processing'})
print("Terminate all children process..")
pool.terminate() # terminate to kill all child processes !!! Like.. super important,
# to avoid memory leak, seriously...
| |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import logging
from volttron.platform.agent import utils
from helpers import *
from measurement_type import MeasurementType
from interval_value import IntervalValue
from neighbor_model import NeighborModel
from const import *
from vertex import Vertex
from timer import Timer
utils.setup_logging()
_log = logging.getLogger(__name__)
class BulkSupplier_dc(NeighborModel):
# BulkSupplier NeighborModel subclass - Represents non-transactive
# neighbor, including demand charges
#
# Created to represent large, non-transactive electricity supplier BPA in
# its relationship to a municipality.
# - Introduces new properties to keep track of peak demand.
# - Calls on a new function to determine hour type (HLH or LLH).
# - Mines tables to determine monthly electricity and demand rates in HLH
# and LLH hour types.
def __init__(self):
super(BulkSupplier_dc, self).__init__()
self.transactive = False
def update_dc_threshold(self, mkt):
# UPDATE_DC_THRESHOLD() - keep track of the month's demand-charge threshold
# obj - BulkSupplier_dc object, which is a NeighborModel
# mkt - Market object
#
# Pseudocode:
# 1. This method should be called prior to using the demand threshold. In
# reality, the threshold will change only during peak periods.
# 2a. (preferred) Read a meter (see MeterPoint) that keeps track of an
# averaged power. For example, a determinant may be based on the
# average demand in a half hour period, so the MeterPoint would ideally
# track that average.
# 2b. (if metering unavailable) Update the demand threshold based on the
# average power in the current time interval.
# Find the MeterPoint object that is configured to measure average demand
# for this NeighborModel. The determination is based on the meter's MeasurementType.
mtr = [x for x in self.meterPoints if x.measurementType == MeasurementType.AverageDemandkW]
if len(mtr) == 0:
# No appropriate MeterPoint object was found. The demand threshold
# must be inferred.
# Gather the active time intervals ti and find the current (soonest) one.
ti = mkt.timeIntervals
ti.sort(key=lambda x: x.startTime)
# Find current demand d that corresponds to the nearest time interval.
d = find_obj_by_ti(self.scheduledPowers, ti[0]) # [avg.kW]
# Update the inferred demand.
self.demandThreshold = max([0, self.demandThreshold, d.value]) # [avg.kW]
else:
# An appropriate MeterPoint object was found. The demand threshold
# may be updated from the MeterPoint object.
# Update the demand threshold.
self.demandThreshold = max([0, self.demandThreshold, mtr[0].currentMeasurement]) # [avg.kW]
if len(mtr) > 1:
# More than one appropriate MeterPoint object was found. This is a
# problem. Warn, but continue.
_log.warning('The BulkSupplier_dc object is associated with too many average-damand meters')
# The demand threshold should be reset in a new month. First find the
# current month number mon.
mon = Timer.get_cur_time().month
if mon != self.demandMonth:
# This must be the start of a new month. The demand threshold must be
# reset. For now, "resetting" means using a fraction (e.g., 80#) of
# the final demand threshold in the prior month.
self.demandThreshold = 0.8 * self.demandThreshold
self.demandMonth = mon
def update_vertices(self, mkt):
# Creates active vertices for a non-transactive neighbor, including demand
# charges.
#
# INPUTS:
# mkt - Market object
#
# OUTPUTS:
# - Updates self.activeVertices for active time intervals.
# Gather active time intervals
time_intervals = mkt.timeIntervals # TimeInterval objects
# Get the maximum power maxp for this neighbor.
maximum_power = self.object.maximumPower # [avg.kW]
# The maximum power property is meaningful for both imported (p>0) and
# exported (p<0) electricity, but this formulation is intended for
# importation (power>0) from an electricity supplier. Warn the user and
# return if the maximum power is negative.
if maximum_power < 0:
_log.warning('Maximum power must be positive in BulkSupplier_dc.m')
_log.warning('Returning without creating active vertices for ' + self.name)
return
# Get the minimum power for this neighbor.
minimum_power = self.object.minimumPower # [avg.kW]
# Only importation is supported from this non-transactive neighbor.
if minimum_power < 0:
_log.warning('Minimum power must be positive in "BulkSupplier_dc.m')
_log.warning('Returning without creating active vertices for ' + self.name)
return
# Cost coefficient a0. This is unavailable from a supply curve, so it
# must be determined directly from the first, constant cost parameter.
# It does NOT affect marginal pricing.
a0 = self.costParameters[0] # [$/h]
# Full-power loss at is defined by the loss factor property and the
# maximum power.
full_power_loss = maximum_power * self.object.lossFactor # [avg.kW]
# Minimum-power loss at Vertex 1 is a fraction of the full-power loss.
# (Power losses are modeled proportional to the square of power
# transfer.)
minimum_power_loss = (minimum_power / maximum_power) ** 2 * full_power_loss # [avg.kW]
# Index through active time intervals
for i in range(len(time_intervals)):
# Find and delete active vertices in the indexed time interval.
# These vertices shall be recreated.
self.activeVertices = [x for x in self.activeVertices if x != time_intervals[i]]
# Find the month number for the indexed time interval start time.
# The month is needed for rate lookup tables.
month_number = time_intervals[i].startTime.month
if is_heavyloadhour(time_intervals[i].startTime):
# The indexed time interval is an HLH hour. The electricity rate
# is a little higher during HLH hours, and demand-charges may
# apply.
# Look up the BPA energy rate for month_number. The second
# parameter is HLH = 1 (i.e., column 1 of the table).
energy_rate = bpa_energy_rate[month_number-1][0] # HLH energy rate [$/kWh]
# Four active vertices are initialized:
# #1 at | |
#!/usr/bin/env python3
import click
import hashlib
import os
import pid
import platform
import random
import shutil
import socket
import sys
import time
import toml
import ujson
from collections import Counter, OrderedDict, defaultdict
from dataclasses import dataclass
from datetime import date, datetime, timedelta
from datetime import time as dttime
from typing import Union, Generator, NewType, Optional
from . import utils
from .data import FigData, File, History, Log, Location, Fragment, ZstdJson
from .figtypes import parse_expressions, SearchExpression, FsFile
from .remote import Host
from .utils import dprint, HashThread, CallableCache, GlobFilter
def resolve_figdir(figdir_cli_arg=Union[None, str]) -> str:
if figdir_cli_arg:
path = figdir_cli_arg
elif 'XDG_CONFIG_HOME' in os.environ:
path = os.path.join(os.environ['XDG_CONFIG_HOME'], 'fig')
else:
path = os.path.join(os.environ['HOME'], '.config', 'fig')
return os.path.abspath(path)
@click.group()
@click.option('--figdir', default=None, envvar='FIG_DIR')
@click.option('--debug', is_flag=True, default=False, envvar='FIG_DEBUG')
@click.option('--config', default="config.toml", envvar='FIG_CONFIG')
@click.pass_context
def cli(ctx, figdir, debug, config):
figdir = resolve_figdir(figdir)
data = FigData(figdir, debug=debug)
if data.db is None and click.get_current_context().invoked_subcommand != "init":
print(f"unable to open database at {figdir}; do you need to run `fig init`?")
raise SystemExit(1)
work = Work(data, config=load_config_file(figdir, config), debug=debug)
ctx.obj = work
def cli_wrapper():
"Remove single '-' arguments into a separate array."
global SINGLE_DASH_ARGS
SINGLE_DASH_ARGS = tuple([x for x in sys.argv if x[0] == '-' and x[1] != '-' and x not in "-0 -1 -print0 -print1".split()])
sys.argv = [x for x in sys.argv if x not in SINGLE_DASH_ARGS]
cli()
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
class AppLog:
def __init__(self, command):
self.command = command
self.argv = sys.argv
self.cwd = os.getcwd()
self.hostname = socket.gethostname()
self.started = datetime.now()
self.stdout_count = 0
self.stderr_count = 0
self.output = []
def __call__(self, stderr, mesg):
self.output.append((time.time(), stderr, mesg))
if stderr:
self.stderr_count += 1
else:
self.stdout_count += 1
class StatusKeeper:
"""A Counter that displays ephemeral status messages for all counter
operations as well as permanent messages for certain counter types. In
printzero mode, only paths are printed and ephemeral messages are not
shown."""
def __init__(self, preseed=None, total_files_expected=None, log=None, print0=False):
self.log = log
self.print0 = print0
self.c = OrderedCounter()
if preseed:
self.preseed(preseed)
self.total_files_expected = total_files_expected
self.progress_count = 0
self.ephemeral_reasons = "ignored already-known unchanged ok stat-updated".split()
self.is_tty = sys.stdout.isatty()
self.tty_cols, _ = shutil.get_terminal_size()
self.disabled = False
self.time_start = time.time()
def disable(self):
self.disabled = True
def preseed(self, preseed: list):
self.c.update({x: 0 for x in preseed})
def __call__(self, reason, path, highlight=None, sha256=None, flags="", extra: Optional[str] = None, coverage=None):
self.c[reason] += 1
try:
path.encode()
except UnicodeEncodeError:
path = path.encode('utf8', 'surrogateescape')
if self.print0:
self.print(path, end='\0')
return
if '\r' in path:
path = path.replace('\r', r"$'\r'")
if '\n' in path:
path = path.replace('\n', r"$'\n'")
if highlight and self.is_tty:
path = utils.highlight(path, highlight)
sha256 = sha256 or "none"
extra = f" {extra}" if extra else ""
cov = f"[{coverage}] " if coverage is not None else ""
mesg = f"{reason:<16.16} {sha256:<8.8} {cov}{path}{extra}"
if flags:
mesg += " " + flags
if reason in self.ephemeral_reasons:
self.ephemeral(mesg, log=True)
else:
self.ephemeral()
self.print(mesg)
def print(self, mesg, ephemeral=False, log=True, **nargs):
if self.disabled:
return
if ephemeral:
print('\r' + mesg + '\033[K\r', end='', flush=True, file=sys.stderr)
stderr = True
else:
print(mesg, **nargs)
stderr = nargs.get('file', None) == sys.stderr
if log and self.log:
self.log(stderr, mesg)
def ephemeral(self, mesg="", log=False):
if self.is_tty:
mesg = mesg[:self.tty_cols - 1]
self.print(mesg, ephemeral=True, log=log)
def progress(self, path, step=1):
self.progress_count += step
last = f"/{self.total_files_expected}" if self.total_files_expected else ""
self.ephemeral(f"[{self.progress_count}{last}] {path}")
def progress_percent(self, step=1, mesg=None):
self.progress_count += step
pct = 100 * self.progress_count / self.total_files_expected
mesg = " " + mesg if mesg else ""
self.ephemeral(f"[{self.progress_count}/{self.total_files_expected}] {pct:0.1f}%{mesg}", log=False)
def progress_rate(self, mesg=None):
self.progress_count += 1
rate = self.progress_count / (time.time() - self.time_start)
self.ephemeral(f"[{self.progress_count}][{int(rate)} files/sec] {mesg}", log=False)
def print_stats(self):
if self.total_files_expected:
remaining = self.total_files_expected - self.progress_count
if remaining > 0:
self.c['unprocessed'] = remaining
for ok_reason in "stat-updated fault-cleared found".split():
if self.c[ok_reason]:
self.c['ok'] += self.c[ok_reason]
if num_faults := self.num_faults():
self.c['faulted'] = num_faults
s = []
for label, count in self.c.items():
s.append(f"{count:,} {label}")
if not sys.stdout.isatty():
fault_conditions = ['changed' in self.c, 'missing' in self.c]
if not any(fault_conditions):
return
self.ephemeral()
self.print("; ".join(s), file=sys.stderr)
def __del__(self):
if self.c:
self.print_stats()
def num_faults(self, exclude_unknown=False):
fault_list = "hash-changed missing lost".split()
if not exclude_unknown:
fault_list.append("unknown")
return sum([self.c[x] for x in fault_list])
def print_matches(self, reason, from_path, *to_paths, sha256=None):
if self.print0:
self.print(from_path, end='\0')
sys.stdout.flush()
return
if len(to_paths) == 1:
renamed = utils.format_rename(from_path, to_paths[0])
self.__call__(reason, renamed.formatted, highlight=renamed.changed, sha256=sha256)
else:
self.__call__(reason, from_path, sha256=sha256)
for idx, to_path in enumerate(to_paths):
self.print("{:>25} {}".format("", to_path))
def load_config_file(figdir, configfile):
configpath = os.path.join(figdir, configfile)
try:
with open(configpath, 'r') as f:
return toml.load(f)
except FileNotFoundError:
return {}
def printcols(*args):
"""Used to print results in N columns; intended for messages that must be
printed even if there is no TTY."""
if sys.stdout.isatty():
print('\r\033[K\r', end='', flush=True, file=sys.stderr) # clear line
if len(args) == 1:
print("{}".format(*args))
elif len(args) == 2:
print("{:<20} {}".format(*args))
else:
print("{:<20} {:<10} {}".format(*args))
@dataclass
class Work:
"""Convenience class to help keep track of the working files for any given
operation."""
data: FigData
config: dict
max_stat_age: Optional[int] = None
debug: bool = False
def __post_init__(self):
self.cache = CallableCache()
self.dirs_cached = []
self.fsfiles = {} # indexed by (host, hostpath)
self.dbfiles = list[File]
self.start_time = time.time()
self.status = StatusKeeper() # log=log
self.localhost = Host(platform.node())
# self.log = AppLog(command=ctx.invoked_subcommand)
self.log = None
if self.max_stat_age is None:
self.max_stat_age = int(os.environ.get('FIG_MAX_STAT_AGE', 0))
def __del__(self):
if self.log:
stats = self.status.c
log = Log(output=ZstdJson(self.log.output), started=self.log.started, ended=datetime.now(),
argv=ujson.dumps(self.log.argv), cwd=self.log.cwd, hostname=self.log.hostname,
stats=ujson.dumps(stats), command=self.log.command)
log.commit(self)
self.log = None
def load_figignore(self, abspath):
"""Recursively load figignore files up. and then load the global
figignore."""
figignore = GlobFilter(verbose=self.debug)
for path in utils.find_files_up(".figignore", startdir=abspath):
anchor = os.path.dirname(path)
figignore.add_exclude_file(anchor=anchor, path=path)
try:
figignore.add_exclude_file(anchor="/", path=os.path.join(self.data.figdir, "figignore"))
except FileNotFoundError:
pass
return figignore
def get_pathspec(self, pathspec: list[str], include_ignored=False) -> Generator[FsFile, None, None]:
for path in pathspec:
host, path = separate_host_path(path)
yield from self.scandir(host, path, include_ignored)
def preload_pathspec(self, *args, **kwargs):
tuple(_ for _ in self.get_pathspec(*args, **kwargs))
if (tuple_size := sys.getsizeof(self.fsfiles)) > 500 * 1024:
dprint(f"warning: preloaded pathspec used {tuple_size} bytes")
def match_dbfiles(self, **kwargs) -> Generator[File, None, None]:
"""Get matching dbfiles from database. Use a dedicated database reader
to avoid locking the database."""
data_reader = FigData(self.data.figdir, debug=self.data.debug)
yield from get_db_files(data_reader, **kwargs)
def get_Files(self, **kwargs) -> Generator[File, None, None]:
"Returns all files matching given conditions."
data_reader = FigData(self.data.figdir)
for dbf in data_reader.get_all(File, **kwargs):
dbf.get_location(data_reader)
load_host(dbf)
yield dbf
def get_sha_Files(self, sha256: str, **kwargs) -> Generator[File, None, None]:
"Returns all files matching sha256 and other conditions. Uses fast sha search."
for maybe_dbf in self.get_Files(sha256_short=sha256[:8], **kwargs):
if maybe_dbf.sha256 == sha256:
yield maybe_dbf
def scandir(self, host, path, include_ignored=False) -> Generator[FsFile, None, None]:
if not host.is_remote:
path = os.path.abspath(path)
figignore = self.load_figignore(path)
def ignore_fn(abspath):
if ignore := figignore(abspath):
self.status('ignored', abspath)
return ignore
for fsf in host.scandir(path, is_ignored_fn=ignore_fn, include_ignored=include_ignored):
# self.fsfiles[(fsf.host, fsf.abspath)] = fsf # TODO: use this key in the future
fsf._host = host
self.fsfiles[fsf.hostpath] = fsf
yield fsf
def progress(self, obj: Union[FsFile, File]) -> None:
try:
path = obj.figpath
except AttributeError:
path = obj.hostpath
self.status.progress(path)
def fstatus(self, obj: Union[FsFile, File], status_message: str, **kwargs) -> None:
try:
path = obj.figpath
except AttributeError:
path = obj.hostpath
self.status(status_message, path, sha256=obj.sha256, **kwargs)
def cache_hostdir(self, host, path):
# TODO: need to bucket by host
# TODO: add maxdepth for scandir
if path not in self.dirs_cached:
for fsf in host.scandir(path):
self.fsfiles[fsf.hostpath] = fsf
def check_dbfile_against_fsfile(self, dbf: File):
return check_File(dbf, self.fsfiles)
@property
def stat_cutoff(self):
# If max_stat_age is < 1E9 (epoch for approximately the year
# 2000), then the value is interpreted as an epoch. Otherwise, is is
# interpreted as the seconds to subtract from the current epoch.
if self.max_stat_age > int(1E9):
return self.max_stat_age
return time.time() - self.max_stat_age
def update_stat(self, dbf: File):
# If it's not already in the cache, and if the DB stat hasn't been
# updated within the cache cutoff, retrieve it from disk.
if not dbf._host:
raise Exception("no host!")
cache = None
try:
cache = {dbf.abspath: self.fsfiles[dbf.abspath]}
except KeyError:
pass
if dbf.stat_checked_on < self.stat_cutoff:
check_File(dbf, cache)
if dbf._dirty:
self.cache.add(lambda: dbf.commit(self.data))
def add_fs_to_db(self, force=False, dry_run=False):
add_fs_to_db(self.data, self.fsfiles, status=self.status, force=force, dry_run=dry_run)
def forget_dbfs(self, dbfs: list[File]) -> int:
"""Remove dbfs and also any history entries. `dbfs` has to have the id,
path, and sha256 columns."""
ids = []
for dbf in dbfs:
self.fstatus(dbf, 'forgotten')
ids.append(dbf.id)
self.data.remove_files_and_histories(file_ids=ids)
| |
``CREATING`` - Product creation has started; the product is not ready for use.
* ``FAILED`` - An action failed.
- **ProductARN** *(string) --*
The ARN of the product.
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
- **ProvisioningArtifactSummaries** *(list) --*
Information about the provisioning artifacts (also known as versions) for the specified product.
- *(dict) --*
Summary information about a provisioning artifact (also known as a version) for a product.
- **Id** *(string) --*
The identifier of the provisioning artifact.
- **Name** *(string) --*
The name of the provisioning artifact.
- **Description** *(string) --*
The description of the provisioning artifact.
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
- **ProvisioningArtifactMetadata** *(dict) --*
The metadata for the provisioning artifact. This is used with AWS Marketplace products.
- *(string) --*
- *(string) --*
- **Tags** *(list) --*
Information about the tags associated with the product.
- *(dict) --*
Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.
- **Key** *(string) --*
The tag key.
- **Value** *(string) --*
The value for this key.
- **TagOptions** *(list) --*
Information about the TagOptions associated with the product.
- *(dict) --*
Information about a TagOption.
- **Key** *(string) --*
The TagOption key.
- **Value** *(string) --*
The TagOption value.
- **Active** *(boolean) --*
The TagOption active state.
- **Id** *(string) --*
The TagOption identifier.
- **Budgets** *(list) --*
Information about the associated budgets.
- *(dict) --*
Information about a budget.
- **BudgetName** *(string) --*
Name of the associated budget.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type Id: string
:param Id: **[REQUIRED]**
The product identifier.
:rtype: dict
:returns:
"""
pass
def describe_product_view(self, Id: str, AcceptLanguage: str = None) -> Dict:
"""
Gets information about the specified product.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeProductView>`_
**Request Syntax**
::
response = client.describe_product_view(
AcceptLanguage='string',
Id='string'
)
**Response Syntax**
::
{
'ProductViewSummary': {
'Id': 'string',
'ProductId': 'string',
'Name': 'string',
'Owner': 'string',
'ShortDescription': 'string',
'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',
'Distributor': 'string',
'HasDefaultPath': True|False,
'SupportEmail': 'string',
'SupportDescription': 'string',
'SupportUrl': 'string'
},
'ProvisioningArtifacts': [
{
'Id': 'string',
'Name': 'string',
'Description': 'string',
'CreatedTime': datetime(2015, 1, 1)
},
]
}
**Response Structure**
- *(dict) --*
- **ProductViewSummary** *(dict) --*
Summary information about the product.
- **Id** *(string) --*
The product view identifier.
- **ProductId** *(string) --*
The product identifier.
- **Name** *(string) --*
The name of the product.
- **Owner** *(string) --*
The owner of the product. Contact the product administrator for the significance of this value.
- **ShortDescription** *(string) --*
Short description of the product.
- **Type** *(string) --*
The product type. Contact the product administrator for the significance of this value. If this value is ``MARKETPLACE`` , the product was created by AWS Marketplace.
- **Distributor** *(string) --*
The distributor of the product. Contact the product administrator for the significance of this value.
- **HasDefaultPath** *(boolean) --*
Indicates whether the product has a default path. If the product does not have a default path, call ListLaunchPaths to disambiguate between paths. Otherwise, ListLaunchPaths is not required, and the output of ProductViewSummary can be used directly with DescribeProvisioningParameters .
- **SupportEmail** *(string) --*
The email contact information to obtain support for this Product.
- **SupportDescription** *(string) --*
The description of the support for this Product.
- **SupportUrl** *(string) --*
The URL information to obtain support for this Product.
- **ProvisioningArtifacts** *(list) --*
Information about the provisioning artifacts for the product.
- *(dict) --*
Information about a provisioning artifact. A provisioning artifact is also known as a product version.
- **Id** *(string) --*
The identifier of the provisioning artifact.
- **Name** *(string) --*
The name of the provisioning artifact.
- **Description** *(string) --*
The description of the provisioning artifact.
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type Id: string
:param Id: **[REQUIRED]**
The product view identifier.
:rtype: dict
:returns:
"""
pass
def describe_provisioned_product(self, Id: str, AcceptLanguage: str = None) -> Dict:
"""
Gets information about the specified provisioned product.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeProvisionedProduct>`_
**Request Syntax**
::
response = client.describe_provisioned_product(
AcceptLanguage='string',
Id='string'
)
**Response Syntax**
::
{
'ProvisionedProductDetail': {
'Name': 'string',
'Arn': 'string',
'Type': 'string',
'Id': 'string',
'Status': 'AVAILABLE'|'UNDER_CHANGE'|'TAINTED'|'ERROR'|'PLAN_IN_PROGRESS',
'StatusMessage': 'string',
'CreatedTime': datetime(2015, 1, 1),
'IdempotencyToken': 'string',
'LastRecordId': 'string',
'ProductId': 'string',
'ProvisioningArtifactId': 'string'
},
'CloudWatchDashboards': [
{
'Name': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ProvisionedProductDetail** *(dict) --*
Information about the provisioned product.
- **Name** *(string) --*
The user-friendly name of the provisioned product.
- **Arn** *(string) --*
The ARN of the provisioned product.
- **Type** *(string) --*
The type of provisioned product. The supported values are ``CFN_STACK`` and ``CFN_STACKSET`` .
- **Id** *(string) --*
The identifier of the provisioned product.
- **Status** *(string) --*
The current status of the provisioned product.
* ``AVAILABLE`` - Stable state, ready to perform any operation. The most recent operation succeeded and completed.
* ``UNDER_CHANGE`` - Transitive state. Operations performed might not have valid results. Wait for an ``AVAILABLE`` status before performing operations.
* ``TAINTED`` - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.
* ``ERROR`` - An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.
* ``PLAN_IN_PROGRESS`` - Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an ``AVAILABLE`` status before performing operations.
- **StatusMessage** *(string) --*
The current status message of the provisioned product.
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
- **IdempotencyToken** *(string) --*
A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.
- **LastRecordId** *(string) --*
The record identifier of the last request performed on this provisioned product.
- **ProductId** *(string) --*
The product identifier. For example, ``prod-abcdzk7xy33qa`` .
- **ProvisioningArtifactId** *(string) --*
The identifier of the provisioning artifact. For example, ``pa-4abcdjnxjj6ne`` .
- **CloudWatchDashboards** *(list) --*
Any CloudWatch dashboards that were created when provisioning the product.
- *(dict) --*
Information about a CloudWatch dashboard.
- **Name** *(string) --*
The name of the CloudWatch dashboard.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type Id: string
:param Id: **[REQUIRED]**
The provisioned product identifier.
:rtype: dict
:returns:
"""
pass
def describe_provisioned_product_plan(self, PlanId: str, AcceptLanguage: str = None, PageSize: int = None, PageToken: str = None) -> Dict:
"""
Gets information about the resource changes for the specified plan.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeProvisionedProductPlan>`_
**Request Syntax**
::
response = client.describe_provisioned_product_plan(
AcceptLanguage='string',
PlanId='string',
PageSize=123,
PageToken='string'
)
**Response Syntax**
::
{
'ProvisionedProductPlanDetails': {
'CreatedTime': datetime(2015, 1, 1),
'PathId': 'string',
| |
'<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%d</td>' % count
print '<td>%s</td>' % ISFDBLink('title.cgi', title_id, title_title)
print '<td>'
for pub in titles[title_title][title_id]:
if pub[0] in empty_pubs:
suffix = ' [EMPTY]'
else:
suffix = ''
print '%s (%s)%s<br>' % (ISFDBLink('pl.cgi', pub[0], pub[1]), pub[2], suffix)
print '</td>'
print '</tr>'
color = color ^ 1
record = result.fetch_row()
print '</table><p>'
def function55():
ui = isfdbUI()
query = """select title_id, title_title from titles t, cleanup c
where t.title_id=c.record_id and c.report_type=55
and %s""" % ui.goodHtmlClause('t', 'title_title')
db.query(query)
result = db.store_result()
num = result.num_rows()
if num:
PrintTableColumns(('', 'Title'))
record = result.fetch_row()
bgcolor = 1
count = 1
while record:
title_id = record[0][0]
title_title = record[0][1]
PrintTitleRecord(title_id, title_title, bgcolor, count)
record = result.fetch_row()
bgcolor ^= 1
count += 1
print "</table>"
else:
print "<h2>No Title records with HTML in Titles.</h2>"
def function56():
ui = isfdbUI()
query = """select pub_id, pub_title from pubs p, cleanup c
where p.pub_id=c.record_id and c.report_type=56
and %s""" % ui.goodHtmlClause('p', 'pub_title')
db.query(query)
result = db.store_result()
num = result.num_rows()
if num:
PrintTableColumns(('', 'Publication'))
record = result.fetch_row()
bgcolor = 1
count = 1
while record:
pub_id = record[0][0]
pub_title = record[0][1]
PrintPublicationRecord(pub_id, pub_title, bgcolor, count)
record = result.fetch_row()
bgcolor ^= 1
count += 1
print "</table>"
else:
print "<h2>No Publications with HTML in Titles.</h2>"
def function57():
print '<h3>For SFE-hosted images, only links to /clute/, /langford/ and /robinson/ sub-directories are allowed.</h3>'
query = """select pub_id, pub_title from pubs, cleanup c
where c.report_type=57
and pubs.pub_id=c.record_id
and pub_frontimage like '%sf-encyclopedia.uk%'
and pub_frontimage not like '%/clute/%'
and pub_frontimage not like '%/langford/%'
and pub_frontimage not like '%/robinson/%'"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
bgcolor = 1
PrintTableColumns(('', 'Publication'))
count = 1
while record:
pub_id = record[0][0]
pub_title = record[0][1]
PrintPublicationRecord(pub_id, pub_title, bgcolor, count)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print '</table>'
else:
print '<h2>No invalid SFE image links found</h2>'
def function58():
cleanup.query = """select a.author_id, a.author_canonical
from authors a, cleanup c
where a.author_language is null
and (
select count(t.title_id) from titles t, canonical_author ca
where a.author_id = ca.author_id
and ca.title_id = t.title_id
and t.title_language is not null
and t.title_language = 16
)>0
and c.record_id=a.author_id
and c.report_type=58
order by a.author_lastname"""
cleanup.none = 'No matching records found'
cleanup.print_author_table()
def function59():
cleanup.query = """select a.author_id, a.author_canonical
from authors a, cleanup c
where a.author_language is null
and (
select count(t.title_id) from titles t, canonical_author ca
where a.author_id = ca.author_id
and ca.title_id = t.title_id
and t.title_language is not null
and t.title_language = 22
)>0
and c.record_id=a.author_id
and c.report_type=59
order by a.author_lastname"""
cleanup.none = 'No matching records found'
cleanup.print_author_table()
def function60():
cleanup.query = """select a.author_id, a.author_canonical
from authors a, cleanup c
where a.author_language is null
and (
select count(t.title_id) from titles t, canonical_author ca
where a.author_id = ca.author_id
and ca.title_id = t.title_id
and t.title_language is not null
and t.title_language = 26
)>0
and c.record_id=a.author_id
and c.report_type=60
order by a.author_lastname"""
cleanup.none = 'No matching records found'
cleanup.print_author_table()
def function61():
cleanup.query = """select a.author_id, a.author_canonical
from authors a, cleanup c
where a.author_language is null
and (
select count(t.title_id) from titles t, canonical_author ca
where a.author_id = ca.author_id
and ca.title_id = t.title_id
and t.title_language is not null
and t.title_language not in (16,17,22,26)
)>0
and c.record_id=a.author_id
and c.report_type=61
order by a.author_lastname"""
cleanup.none = 'No matching records found'
cleanup.print_author_table()
def function62():
query = """select t.title_id,t.title_title
from cleanup c, titles t
where (
(t.title_storylen is not null
and t.title_storylen not in ('ss','nt','nv'))
or (t.title_storylen in ('ss','nt','nv')
and t.title_ttype!='SHORTFICTION')
)
and c.record_id=t.title_id and c.report_type=62
order by t.title_title"""
db.query(query)
result = db.store_result()
record = result.fetch_row()
num = result.num_rows()
if num:
PrintTableColumns(('', 'Title'))
bgcolor = 1
count = 1
while record:
title_id = record[0][0]
title_title = record[0][1]
PrintTitleRecord(title_id, title_title, bgcolor, count)
bgcolor = bgcolor ^ 1
count += 1
record = result.fetch_row()
print '</table><p>'
else:
print "<h2>No Titles with Invalid Length Values found</h2>"
def function63():
query = """select distinct t1.title_id, t1.title_title
from titles t1, titles t2, cleanup c
where t1.title_parent = t2.title_id
and t1.title_non_genre != t2.title_non_genre
and t1.title_id = c.record_id
and c.report_type = 63
order by t1.title_title"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if not num:
print "<h2>No Genre/Non-Genre Mismatches.</h2>"
return
PrintTableColumns(('', 'Title'))
bgcolor = 1
count = 1
record = result.fetch_row()
while record:
title_id = record[0][0]
title_title = record[0][1]
PrintTitleRecord(title_id, title_title, bgcolor, count)
bgcolor = bgcolor ^ 1
count += 1
record = result.fetch_row()
print '</table>'
def function64():
print """<h3>If a series legitimately contains EDITOR and non-EDITOR titles,
please mention this fact in the series' Note field before marking it "Ignored".</h3>"""
query = """select s.series_id, s.series_title, c.cleanup_id from series s, cleanup c
where s.series_id = c.record_id and c.report_type = 64 and c.resolved IS NULL
and exists(select 1 from titles t where t.series_id = s.series_id and t.title_ttype = 'EDITOR')
and exists(select 1 from titles t where t.series_id = s.series_id and t.title_ttype != 'EDITOR')
order by s.series_title"""
db.query(query)
result = db.store_result()
if not result.num_rows():
print '<h2>No Series with EDITOR and non-EDITOR Titles</h2>'
return
# Print table headers
PrintTableColumns(('', 'Series', 'Ignore'))
record = result.fetch_row()
bgcolor = 1
count = 1
while record:
series_id = record[0][0]
series_name = record[0][1]
cleanup_id = record[0][2]
PrintSeriesRecord(series_id, series_name, bgcolor, count, cleanup_id, 64)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print "</table>"
def function65():
pattern_match = ISFDBBadUnicodePatternMatch('publisher_name')
cleanup.query = """select publisher_id, publisher_name
from publishers p, cleanup c where (%s)
and p.publisher_id=c.record_id
and c.report_type=65
order by p.publisher_name""" % pattern_match
cleanup.none = 'No Publishers with Invalid Unicode Characters Found'
cleanup.print_publisher_table()
def function66():
pattern_match = ISFDBBadUnicodePatternMatch('pub_series_name')
cleanup.query = """select pub_series_id, pub_series_name
from pub_series p, cleanup c where (%s)
and p.pub_series_id=c.record_id
and c.report_type=66
order by p.pub_series_name""" % pattern_match
cleanup.none = 'No Publication Series with Invalid Unicode Characters Found'
cleanup.print_pub_series_table()
def function67():
pattern_match = ISFDBBadUnicodePatternMatch('series_title')
cleanup.query = """select series_id, series_title
from series s, cleanup c where (%s)
and s.series_id=c.record_id
and c.report_type=67
order by s.series_title""" % pattern_match
cleanup.none = 'No Series with Invalid Unicode Characters Found'
cleanup.print_series_table()
def function68():
pattern_match = ISFDBBadUnicodePatternMatch('author_canonical')
cleanup.query = """select author_id, author_canonical
from authors a, cleanup c where (%s)
and a.author_id=c.record_id
and c.report_type=68
order by a.author_lastname""" % pattern_match
cleanup.none = 'No Authors with Invalid Unicode Characters Found'
cleanup.print_author_table()
def function69():
pattern_match = ISFDBBadUnicodePatternMatch('title_title')
cleanup.query = """select title_id, title_title
from titles t, cleanup c where (%s)
and t.title_id=c.record_id
and c.report_type=69
order by t.title_title""" % pattern_match
cleanup.none = 'No Titles with Invalid Unicode Characters Found'
cleanup.print_title_table()
def function70():
pattern_match = ISFDBBadUnicodePatternMatch('pub_title')
cleanup.query = """select pub_id, pub_title
from pubs p, cleanup c where (%s)
and p.pub_id=c.record_id
and c.report_type=70
order by p.pub_title""" % pattern_match
cleanup.none = 'No Publications with Invalid Unicode Characters Found'
cleanup.print_pub_table()
def function71():
print """<h3>This report lists all 9999-00-00 titles and titles expected
to be published more than 3 months in the future.</h3>"""
query = """select t.title_id, t.title_title, t.title_copyright
from titles t, cleanup c
where t.title_copyright > DATE_ADD(NOW(), INTERVAL 3 MONTH)
and t.title_copyright != '8888-00-00'
and t.title_id=c.record_id and c.report_type=71
order by t.title_title"""
db.query(query)
result = db.store_result()
if result.num_rows() > 0:
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('', 'Title', 'Date'))
while record:
title_id = record[0][0]
title_title = record[0][1]
title_date = record[0][2]
if bgcolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%d</td>' % int(count)
print '<td>%s</td>' % ISFDBLink('title.cgi', title_id, title_title)
print '<td>%s</td>' % title_date
print '</tr>'
bgcolor ^= 1
count += 1
record = result.fetch_row()
print '</table>'
else:
print '<h2>No Forthcoming Titles Found</h2>'
return
def function72():
cleanup.note = """This report lists all 9999-00-00 publications and publications
expected to be published more than 3 months in the future."""
cleanup.query = """select pub_id, pub_title, p.pub_year
from pubs p, cleanup c
where p.pub_year > DATE_ADD(NOW(), INTERVAL 3 MONTH)
and p.pub_year != '8888-00-00'
and p.pub_id=c.record_id and c.report_type=72
order by p.pub_title"""
cleanup.none = 'No Forthcoming Publications Found'
cleanup.print_pub_with_date_table()
def function73():
pattern_match = suspectUnicodePatternMatch('publisher_name')
query = """select publisher_id, publisher_name, c.cleanup_id
from publishers p, cleanup c where (%s)
and p.publisher_id=c.record_id and c.report_type=73
and c.resolved IS NULL
order by p.publisher_name""" % pattern_match
db.query(query)
result = db.store_result()
if result.num_rows() > 0:
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('', 'Publisher', 'Ignore'))
while record:
id = record[0][0]
name = record[0][1]
cleanup_id = record[0][2]
PrintPublisherRecord(id, name, bgcolor, count, cleanup_id, 73)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print "</table>"
else:
print "<h2>No Publishers with Suspect Unicode Characters Found</h2>"
return
def function74():
pattern_match = suspectUnicodePatternMatch('title_title')
query = """select title_id, title_title, | |
parallelize the normal estimation for speed
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(estimate_normals)(point_cloud_filename, depth_param_filename, normals_dir_name, n_neighbors) for point_cloud_filename in point_cloud_path_list)
def export_video_montage(input_path, output_path, devices='dev3'):
"""
export a single video containing videos from different captured modalities including depth, rgb,
normal vectors, point clouds, 2D poses.
Note: Assumes all modality videos already exist in the input directory
Parameters
----------
input_path : dataset directory (where all modality videos are available)
output_path : path to save the montage videos
Returns
-------
Saves the montage video
"""
_, scan_path_list, _, _, _, _, _ = get_scan_list(input_path, devices)
fps = 30
size = (512*3, 424*2 + 2*50)
for i, scan in enumerate(scan_path_list):
device_list = get_subdirs(scan)
for device in device_list:
scan_device_path = os.path.join(scan, device)
processed_scan_path = scan_device_path.replace(scan_device_path[:scan_device_path.index("ANU_ikea_dataset") + 17], output_path)
rgb_video_file = os.path.join(processed_scan_path, 'images/scan_video.avi')
depth_video_file = os.path.join(processed_scan_path, 'depth/scan_video.avi')
normals_video_file = os.path.join(processed_scan_path, 'normals/scan_video.avi')
point_clouds_video_file = os.path.join(processed_scan_path, 'point_clouds/point_cloud_video.avi')
pose2D_video_file = os.path.join(processed_scan_path, '2DposeImages/scan_video.avi')
video_file_name = os.path.join(processed_scan_path, 'montage.avi')
if os.path.exists(rgb_video_file) and os.path.exists(depth_video_file) and \
os.path.exists(normals_video_file) and os.path.exists(point_clouds_video_file) and \
os.path.exists(pose2D_video_file):
rgb_vid = cv2.VideoCapture(rgb_video_file)
depth_vid = cv2.VideoCapture(depth_video_file)
normals_vid = cv2.VideoCapture(normals_video_file)
pc_vid = cv2.VideoCapture(point_clouds_video_file)
pose2D_vid = cv2.VideoCapture(pose2D_video_file)
montage_video_writer = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
while (rgb_vid.isOpened()):
rgb_ret, rgb_frame = rgb_vid.read()
depth_ret, depth_frame = depth_vid.read()
normals_ret, normals_frame = normals_vid.read()
normals_frame = cv2.flip(normals_frame, 1)
pc_ret, pc_frame = pc_vid.read()
pc_frame = cv2.flip(pc_frame, 1)
pose2D_ret, pose2D_frame = pose2D_vid.read()
if rgb_ret and depth_ret and normals_ret and pc_ret and pose2D_ret:
rgb_frame = cv2.resize(rgb_frame, (512, 424))
pose2D_frame = cv2.resize(pose2D_frame, (512, 424))
pc_frame = cv2.resize(pc_frame, (512, 424))
rgb_frame = insert_text_to_image(rgb_frame, 'RGB')
depth_frame = insert_text_to_image(depth_frame, 'Depth')
normals_frame = insert_text_to_image(normals_frame, 'Normal Vectors')
pose2D_frame = insert_text_to_image(pose2D_frame, '2D Pose')
pc_frame =insert_text_to_image(pc_frame, '3D Point Cloud')
montage_row1 = cv2.hconcat([rgb_frame, depth_frame])
montage_row2 = cv2.hconcat([normals_frame, pc_frame, pose2D_frame])
side_margin = int((montage_row2.shape[1] - montage_row1.shape[1]) / 2)
montage_row1 = cv2.copyMakeBorder(montage_row1, 0, 0, side_margin, side_margin, cv2.BORDER_CONSTANT,
value=[0, 0, 0])
montage_frame = cv2.vconcat([montage_row1, montage_row2])
montage_video_writer.write(montage_frame)
else:
break
rgb_vid.release()
depth_vid.release()
normals_vid.release()
pc_vid.release()
pose2D_vid.release()
montage_video_writer.release()
print('Saved ' + video_file_name)
else:
print('One or more videos required for the montage is not available, please preprocess the dataset first')
def insert_text_to_image(img, txt, font=cv2.FONT_HERSHEY_SIMPLEX, font_size=1):
# get boundary of this text
textsize = cv2.getTextSize(txt, font, 1, 2)[0]
# get coords based on boundary
textX = int((img.shape[1] - textsize[0]) / 2)
img = cv2.copyMakeBorder(img, 50, 0, 0, 0, cv2.BORDER_CONSTANT, value=[0, 0, 0])
cv2.putText(img, txt, (textX, 40), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
return img
def export_pose_images(input_path, output_path='', device='dev3', scan_name=None, mode='skeleton', skeleton_type='openpose'):
"""
Saves images with human pose
Parameters
----------
input_path : path to ikea dataset
output_path : path to save the output images
scan_name : None | scane name to export. if None traverses the entire dataset
Returns
-------
exports all jpg files to output directry
"""
if output_path == '':
output_path = input_path
if not scan_name is None:
scan_path = os.path.join(input_path, scan_name, device)
output_path = os.path.join(output_path, scan_name, device, '2DposeImages')
os.makedirs(output_path, exist_ok=True)
rgb_frames = get_files(os.path.join(scan_path, 'images'), file_type='.jpg')
n_frames = len(rgb_frames)
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(export_pose_helper)(scan_path, rgb_frames, j, output_path, mode, skeleton_type)
for j in range(n_frames))
# # debug
# for j in range(n_frames):
# export_pose_helper(scan_path, rgb_frames, j, output_path, mode, skeleton_type=skeleton_type)
else:
#TODO implement dataset traversal pose export
pass
def get_seg_data(input_path, output_path='', device='dev3', scan_name=None):
color_cat = {1: (255, 0, 0), 2: (0, 0, 255), 3: (0, 255, 0), 4: (127, 0, 127), 5: (127, 64, 0), 6: (64, 0, 127),
7: (64, 0, 64)}
cat_dict = {1: 'table_top', 2: 'leg', 3: 'shelf', 4: 'side_panel', 5: 'front_panel', 6: 'bottom_panel',
7: 'rear_panel'}
if output_path == '':
output_path = input_path
scan_path = os.path.join(input_path, scan_name, device)
# output_path = os.path.join(output_path, scan_name, device, 'seg')
# os.makedirs(output_path, exist_ok=True)
rgb_frames = get_files(os.path.join(scan_path, 'images'), file_type='.jpg')
n_frames = len(rgb_frames)
scan_name = scan_path.split('/')[-2]
seg_json_filename = os.path.join(scan_path, 'seg', scan_name + '.json')
tracking_path = os.path.join(scan_path, 'seg', 'tracklets_interp_' + scan_name + '.txt')
all_segments, dict_tracks, track_id = get_all_segments_and_tracks(seg_json_filename, tracking_path)
# Obtain Unique colors for each part
dict_colors = get_object_segment_color_dict(track_id)
all_segments_dict = {}
for item in all_segments['annotations']:
if item['image_id'] not in all_segments_dict:
all_segments_dict[item['image_id']] = []
all_segments_dict[item['image_id']].append(item)
return rgb_frames, dict_tracks, all_segments, all_segments_dict, dict_colors, color_cat, cat_dict, n_frames
def get_seg_data_v2(input_path, output_path='', device='dev3', scan_name=None):
# This function works specifically for the training data where psudo ground truth is available and tracking data isnt.
color_cat = {1: (129, 0, 70), 2: (220, 120, 0), 3: (255, 100, 220), 4: (6, 231, 255), 5: (89, 0, 251), 6: (251, 121, 64),
7: (171, 128, 126)}
cat_dict = {1: 'table_top', 2: 'leg', 3: 'shelf', 4: 'side_panel', 5: 'front_panel', 6: 'bottom_panel',
7: 'rear_panel'}
if output_path == '':
output_path = input_path
scan_path = os.path.join(input_path, scan_name, device)
rgb_frames = get_files(os.path.join(scan_path, 'images'), file_type='.jpg')
n_frames = len(rgb_frames)
seg_pgt_json_filename = os.path.join(scan_path, 'pseudo_gt_coco_format.json')
seg_gt_json_filename = os.path.join(scan_path, 'manual_coco_format.json')
gt_segments = json.load(open(seg_gt_json_filename))
pgt_segments = json.load(open(seg_pgt_json_filename))
all_segments = {'images': np.concatenate([gt_segments['images'], pgt_segments['images']]),
'annotations': np.concatenate([gt_segments['annotations'], pgt_segments['annotations']]),
'categories': gt_segments['categories']}
all_segments_dict = {}
for item in gt_segments['annotations']:
file_name = gt_segments['images'][item['image_id']]['file_name']
if file_name not in all_segments_dict:
all_segments_dict[file_name] = []
all_segments_dict[file_name].append(item)
for item in pgt_segments['annotations']:
file_name = pgt_segments['images'][item['image_id']-1]['file_name'] # TODO: remove -1 after indexing is fixed
if file_name not in all_segments_dict:
all_segments_dict[file_name] = []
all_segments_dict[file_name].append(item)
return rgb_frames, all_segments, all_segments_dict, color_cat, cat_dict, n_frames
def export_seg_images(input_path, output_path='', device='dev3', scan_name=None):
"""
Saves images with object segmentation
Parameters
----------
input_path : path to ikea dataset
output_path : path to save the output images
scan_name : None | scane name to export. if None traverses the entire dataset
Returns
-------
exports all jpg files to output directry
"""
if not scan_name is None:
rgb_frames, dict_tracks, all_segments, all_segments_dict, dict_colors, color_cat, cat_dict, n_frames =\
get_seg_data(input_path=input_path, output_path=output_path, device=device, scan_name=scan_name)
else:
#TODO implement dataset traversal pose export
pass
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(export_seg_helper)(rgb_frames, j, output_path, dict_tracks,
all_segments, all_segments_dict, dict_colors, color_cat,
cat_dict) for j in range(n_frames))
def get_all_segments_and_tracks(seg_json_filename, tracking_path):
"""
Load the part segments and tracks from json and txt files
Parameters
----------
seg_json_filename : path to .json segments file
tracking_path : path to tracking txt file
Returns
-------
"""
all_segments = json.load(open(seg_json_filename))
fid_track = open(tracking_path)
tracking_results = str.split(fid_track.read(), '\n')
track_id = []
dict_tracks = {}
for track in tracking_results:
if track != "":
track_id.append(int(str.split(track, ' ')[-3]))
items = str.split(track, ' ')
if items[-2] not in dict_tracks:
dict_tracks[items[-2]] = []
dict_tracks[items[-2]].append([items[0:5], items[-1]])
return all_segments, dict_tracks, track_id
def get_object_segment_color_dict(track_id):
"""
Parameters
----------
track_id :
Returns
-------
"""
dict_colors = {}
max_part = np.max(np.unique(track_id))
r = random.sample(range(0, 255), max_part)
g = random.sample(range(0, 255), max_part)
b = random.sample(range(0, 255), max_part)
for part_id in np.unique(track_id):
dict_colors[str(part_id)] = (int(r[part_id - 1]), int(g[part_id - 1]), int(b[part_id - 1]))
return dict_colors
def export_pose_helper(scan_path, rgb_frames, file_idx, output_path, mode, skeleton_type='openpose'):
"""
export pose for a single image - allows parallelization
Parameters
----------
scan_path :
rgb_frames :
file_idx :
output_path :
Returns
-------
"""
frame_filename = str(file_idx).zfill(6)
pose_json_filename = os.path.join(scan_path, 'predictions', 'pose2d', skeleton_type,
'scan_video_' + str(file_idx).zfill(12) + '_keypoints.json')
output_filename = os.path.join(output_path, frame_filename + '.jpg')
img = cv2.imread(rgb_frames[file_idx])
if mode == 'skeleton':
img = img_pose_skeleton_overlay(img, pose_json_filename, skeleton_type=skeleton_type )
else:
img = img_pose_mesh_overlay(img, pose_json_filename)
cv2.imwrite(output_filename, img)
print('Saved pose for ' + frame_filename + '.jpeg to ' + output_path)
def export_seg_helper(rgb_frames, file_idx, output_path, dict_tracks, all_segments, all_segments_dict,
dict_colors, color_cat, cat_dict):
"""
export object segmentation for a single image - allows parallelization
Parameters
----------
scan_path :
rgb_frames :
file_idx :
output_path :
Returns
-------
"""
frame_filename = str(file_idx).zfill(6)
image_id = find_seg_id(frame_filename, all_segments)
fname_id = int(str.split(frame_filename, '.')[0])
segment = all_segments_dict[image_id]
track = dict_tracks[str(fname_id)]
output_filename = os.path.join(output_path, frame_filename + '.jpg')
img = cv2.imread(rgb_frames[file_idx])
img = img_seg_overlay(img, segment, track, dict_colors, color_cat, cat_dict)
cv2.imwrite(output_filename, img)
print('Saved object segmentation for ' + frame_filename + '.jpeg to ' + output_path)
def find_seg_id(image_name, test_data):
for item in test_data['images']:
if item['file_name'].find(image_name) != -1:
return item['id']
return -1
def find_seg_id_v2(image_name, test_data):
for img in test_data.keys():
if image_name in img:
return img
return -1
def img_seg_overlay(image, predictions, part_tracks, dict_colors, color_cat, cat_dict):
"""
overlays object segmentation from json file on the given image
Parameters
----------
img : rgb image
json_path : path to .json file
Returns
-------
img : rgb img with object segments overlay
"""
for part in part_tracks:
assigned = 0
for item in predictions:
box = item['bbox']
label = item['category_id']
segment = item['segmentation']
segment_id = item['id']
contours = []
length = len(segment)
if segment_id == int(part[1]):
for i in range(length):
id = | |
<filename>datacube/index/_datasets.py
# coding=utf-8
"""
API for dataset indexing, access and search.
"""
from __future__ import absolute_import
import logging
from cachetools.func import lru_cache
from datacube import compat
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.utils import InvalidDocException, check_doc_unchanged, jsonify_document, get_doc_changes, contains
from . import fields
from .exceptions import DuplicateRecordError, UnknownFieldError
_LOG = logging.getLogger(__name__)
class MetadataTypeResource(object):
def __init__(self, db):
"""
:type db: datacube.index.postgres._api.PostgresDb
"""
self._db = db
def add(self, definition, allow_table_lock=False):
"""
:type definition: dict
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
:rtype: datacube.model.MetadataType
"""
# This column duplication is getting out of hand:
MetadataType.validate(definition)
name = definition['name']
existing = self._db.get_metadata_type_by_name(name)
if existing:
# They've passed us the same one again. Make sure it matches what is stored.
# TODO: Support for adding/updating search fields?
check_doc_unchanged(
existing.definition,
definition,
'Metadata Type {}'.format(name)
)
else:
self._db.add_metadata_type(
name=name,
definition=definition,
concurrently=not allow_table_lock
)
return self.get_by_name(name)
@lru_cache()
def get(self, id_):
"""
:rtype: datacube.model.MetadataType
"""
return self._make(self._db.get_metadata_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
:rtype: datacube.model.MetadataType
"""
record = self._db.get_metadata_type_by_name(name)
if not record:
return None
return self._make(record)
def check_field_indexes(self, allow_table_lock=False, rebuild_all=False):
"""
Create or replace per-field indexes and views.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
"""
self._db.check_dynamic_fields(concurrently=not allow_table_lock, rebuild_all=rebuild_all)
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype list[datacube.model.MetadataType]
"""
definition = query_row['definition']
dataset_ = definition['dataset']
return MetadataType(
query_row['name'],
dataset_,
dataset_search_fields=self._db.get_dataset_fields(query_row),
id_=query_row['id']
)
class DatasetTypeResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
def __init__(self, db, metadata_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
self._db = db
self.metadata_type_resource = metadata_type_resource
def from_doc(self, definition):
"""
Create a Product from its definitions
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
# This column duplication is getting out of hand:
DatasetType.validate(definition)
metadata_type = definition['metadata_type']
# They either specified the name of a metadata type, or specified a metadata type.
# Is it a name?
if isinstance(metadata_type, compat.string_types):
metadata_type = self.metadata_type_resource.get_by_name(metadata_type)
else:
# Otherwise they embedded a document, add it if needed:
metadata_type = self.metadata_type_resource.add(metadata_type, allow_table_lock=False)
if not metadata_type:
raise InvalidDocException('Unknown metadata type: %r' % definition['metadata_type'])
return DatasetType(metadata_type, definition)
def add(self, type_):
"""
Add a Product
:param datacube.model.DatasetType type_: Product to add
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if existing:
# TODO: Support for adding/updating match rules?
# They've passed us the same collection again. Make sure it matches what is stored.
check_doc_unchanged(
existing.definition,
jsonify_document(type_.definition),
'Dataset type {}'.format(type_.name)
)
else:
self._db.add_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
return self.get_by_name(type_.name)
def update(self, type_, allow_unsafe_updates=False):
"""
Update a product. Unsafe changes will throw a ValueError by default.
(An unsafe change is anything that may potentially make the product
incompatible with existing datasets of that type)
:param datacube.model.DatasetType type_: Product to add
:param allow_unsafe_updates bool: Allow unsafe changes. Use with caution.
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if not existing:
raise ValueError('Unknown product %s, cannot update – did you intend to add it?' % type_.name)
def handle_unsafe(msg):
if not allow_unsafe_updates:
raise ValueError(msg)
else:
_LOG.warning("Ignoring %s", msg)
# We'll probably want to use offsets in the future (ie. nested dicts), not just keys, but for now this suffices.
safe_keys_to_change = ('description', 'metadata')
doc_changes = get_doc_changes(existing.definition, jsonify_document(type_.definition))
for offset, old_value, new_value in doc_changes:
_LOG.info('Changing %s %s: %r -> %r', type_.name, '.'.join(offset), old_value, new_value)
key_name = offset[0]
if key_name not in safe_keys_to_change:
handle_unsafe('Potentially unsafe update: changing %r of product definition.' % key_name)
# You can safely make the match rules looser but not tighter.
if key_name == 'metadata':
# Tightening them could exclude datasets already matched to the product.
# (which would make search results wrong)
if not contains(old_value, new_value, case_sensitive=True):
handle_unsafe('Unsafe update: new product match rules are not a superset of old ones.')
if doc_changes:
_LOG.info("Updating product %s", type_.name)
self._db.update_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
# Clear our local cache. Note that other users may still have
# cached copies for the duration of their connections.
self.get_by_name.cache_clear()
self.get.cache_clear()
else:
_LOG.info("No changes detected for product %s", type_.name)
def update_document(self, definition, allow_unsafe_update=False):
"""
Update a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.update(type_, allow_unsafe_updates=allow_unsafe_update)
def add_document(self, definition):
"""
Add a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.add(type_)
@lru_cache()
def get(self, id_):
"""
Retrieve Product by id
:param int id_: id of the Product
:rtype: datacube.model.DatasetType
"""
return self._make(self._db.get_dataset_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
Retrieve Product by name
:param str name: name of the Product
:rtype: datacube.model.DatasetType
"""
result = self._db.get_dataset_type_by_name(name)
if not result:
return None
return self._make(result)
def get_with_fields(self, field_names):
"""
Return dataset types that have all the given fields.
:param tuple[str] field_names:
:rtype: __generator[DatasetType]
"""
for type_ in self.get_all():
for name in field_names:
if name not in type_.metadata_type.dataset_fields:
break
else:
yield type_
def search(self, **query):
"""
Return dataset types that have all the given fields.
:param dict query:
:rtype: __generator[DatasetType]
"""
for type_, q in self.search_robust(**query):
if not q:
yield type_
def search_robust(self, **query):
"""
Return dataset types that match match-able fields and dict of remaining un-matchable fields.
:param dict query:
:rtype: __generator[(DatasetType, dict)]
"""
for type_ in self.get_all():
q = query.copy()
if q.pop('product', type_.name) != type_.name:
continue
if q.pop('metadata_type', type_.metadata_type.name) != type_.metadata_type.name:
continue
for key, value in list(q.items()):
try:
exprs = fields.to_expressions(type_.metadata_type.dataset_fields.get, **{key: value})
except UnknownFieldError as e:
break
try:
if all(expr.evaluate(type_.metadata_doc) for expr in exprs):
q.pop(key)
else:
break
except (AttributeError, KeyError, ValueError) as e:
continue
else:
yield type_, q
def get_all(self):
"""
Retrieve all Products
:rtype: iter[datacube.model.DatasetType]
"""
return (self._make(record) for record in self._db.get_all_dataset_types())
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype datacube.model.DatasetType
"""
return DatasetType(
definition=query_row['definition'],
metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']),
id_=query_row['id'],
)
class DatasetResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type types: datacube.index._datasets.DatasetTypeResource
"""
def __init__(self, db, dataset_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type dataset_type_resource: datacube.index._datasets.DatasetTypeResource
"""
self._db = db
self.types = dataset_type_resource
def get(self, id_, include_sources=False):
"""
Get dataset by id
:param uuid id_: id of the dataset to retrieve
:param bool include_sources: get the full provenance graph?
:rtype: datacube.model.Dataset
"""
if not include_sources:
return self._make(self._db.get_dataset(id_), full_info=True)
datasets = {result['id']: (self._make(result, full_info=True), result)
for result in self._db.get_dataset_sources(id_)}
for dataset, result in datasets.values():
dataset.metadata_doc['lineage']['source_datasets'] = {
classifier: datasets[str(source)][0].metadata_doc
for source, classifier in zip(result['sources'], result['classes']) if source
}
dataset.sources = {
classifier: datasets[str(source)][0]
for source, classifier in zip(result['sources'], result['classes']) if source
}
return datasets[id_][0]
def get_derived(self, id_):
"""
Get drived datasets
:param uuid id_: dataset id
:rtype: list[datacube.model.Dataset]
"""
return [self._make(result) for result in self._db.get_derived_datasets(id_)]
def has(self, dataset):
"""
Have we already indexed this dataset?
:param datacube.model.Dataset dataset: dataset to check
:rtype: bool
"""
return self._db.contains_dataset(dataset.id)
def add(self, dataset, skip_sources=False):
"""
Ensure a dataset is in the index. Add it if not present.
:param datacube.model.Dataset dataset: dataset to add
:param bool skip_sources: don't attempt to index source (use when sources are already indexed)
:rtype: datacube.model.Dataset
"""
if not skip_sources:
for source in dataset.sources.values():
self.add(source)
was_inserted = False
sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources
dataset.type.dataset_reader(dataset.metadata_doc).sources = {}
try:
_LOG.info('Indexing %s', dataset.id)
with self._db.begin() as transaction:
try:
was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, dataset.type.id)
for classifier, source_dataset in dataset.sources.items():
transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id)
# try to update location in the same transaction as insertion.
# if insertion fails we'll try updating location later
# if insertion succeeds the location bit can't possibly fail
if dataset.local_uri:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
if not was_inserted:
existing = self.get(dataset.id)
if existing:
check_doc_unchanged(
existing.metadata_doc,
jsonify_document(dataset.metadata_doc),
'Dataset {}'.format(dataset.id)
)
# reinsert attempt? try updating the location
if dataset.local_uri:
try:
self._db.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
finally:
dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp
return dataset
def archive(self, ids):
"""
Mark datasets as archived
:param list[uuid] ids: | |
import time
import gym, numpy as np
import pygame, pymunk, logging, math, random
# imports for DQNAgent
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from tensorflow.keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
import os
# Hide GPU from visible devices
tf.config.set_visible_devices([], 'GPU')
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = 'BOX'
# Exploration settings
ELIPSON_DECAY = 0.099975
MIN_EPSILON = 0.001
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
# Memory fraction, used mostly when training multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.create_file_writer(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self, env):
self.env = env
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self,):
model = Sequential()
observation_space = 60000, np.array(self.env.observation).shape[0], np.array(self.env.observation).shape[1], 1
action_space = self.env.action_space.n
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=observation_space[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(action_space, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states)
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
class WorldEnvironment(gym.Env):
def __init__(self, terrain_world, parent):
self.action_space = gym.spaces.Discrete(15)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
self.velocity = (0, 0)
self.position = (0, 0)
self.terrain_world = terrain_world
self.parent = parent
self.inventory = []
self.scheduled_rewards = []
self.time_lapsed = 0
self.health = 100
self.observation = self.get_observation()
self.last_state = self.observation
self.agent = DQNAgent(self)
def play_sound(self):
# play self.parent.parent.parent.assets.get("coin.wav")
sound = self.parent.parent.parent.textures.get("coin.wav")
pygame.mixer.Sound.play(sound)
def get_observation(self):
_ = int(self.position[0] / 32), int(self.position[1] / 32)
observation = self.parent.parent.get_terrain_matrix(_, fov=25)
return observation
def step(self, action):
self.position = self.parent.body.position
self.time_lapsed += 1
reward = 0
# generate action id from tensor
if not isinstance(action, int):
action = action.argmax()
if action > 8 and action < 13 and random.randint(0, 3) == 0:
self.play_sound()
if action == 0:
self.velocity = (self.velocity[0] + 40, self.velocity[1])
elif action == 1:
self.velocity = (self.velocity[0] - 40, self.velocity[1])
elif action == 2:
self.velocity = (self.velocity[0], self.velocity[1] - 400)
elif action == 3:
self.velocity = (self.velocity[0] + 40, self.velocity[1] - 400)
elif action == 4:
self.velocity = (self.velocity[0] - 40, self.velocity[1] - 400)
elif action == 5:
# break block above
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block(pos)
if block is not None:
self.inventory.append(block)
elif action == 6:
# break block below
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block((pos[0], pos[1] + 1))
if block is not None:
self.inventory.append(block)
elif action == 7:
# break block left
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block((pos[0]-1, pos[1]))
if block is not None:
self.inventory.append(block)
elif action == 8:
# break block right
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block((pos[0]+1, pos[1]))
if block is not None:
self.inventory.append(block)
elif action == 9:
# place block above
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block(pos, block)
except Exception as e:
pass
elif action == 10:
# place block below
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block((pos[0], pos[1] + 1), block)
except Exception as e:
pass
elif action == 11:
# place block left
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block((pos[0]-1, pos[1]), block)
except Exception as e:
pass
elif action == 12:
# place block right
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block((pos[0]+1, pos[1]), block)
except Exception as e:
pass
# 13, 14: attack left, right
elif action == 13:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
_ = self.parent.parent.attack((pos[0]-1, pos[1]))
if _ == None:
reward -= 10
else:
print(f'{_} was attacked by {self.parent.name}')
elif action == 14:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
_ = self.parent.parent.attack((pos[0]+1, pos[1]))
if _ == None:
reward -= 10
else:
print(f'{_} was attacked by {self.parent.name}')
if self.position[1] > 10000:
reward += -100
self.reset()
print(f"[{self.parent.name}] fell off the world")
# reward on inventory size
reward += len(self.inventory) / 10
if len(self.scheduled_rewards) > 0:
reward += self.scheduled_rewards.pop(0)
reward += 100 * 1 / self.time_lapsed
# get distance to (0, 0)
distance = math.dist((0, 0), self.position)
if distance > 1000:
reward += 1 * distance / 1000
else:
reward += -1 * distance / 1000
# If a block exists at the player's position, the player is suffocated
_ = (self.position[0] / 32, self.position[1] / 32)
if self.parent.parent.get_terrain_at(_) != 0:
reward += -100
self.health -= 1
# sort the inventory
self.inventory = sorted(self.inventory, key=lambda x: x)
observation = self.get_observation()
# Health is 0 if the player is dead
if self.health <= 0:
reward += -100
self.reset()
print(f"[{self.parent.name}] died")
# give reward for maintaining health
reward += (self.health - 50) * 0.1
# train DQNAgent
self.agent.update_replay_memory((self.last_state, action, reward, observation, True))
self.last_state = | |
0xbec1, 0xbc26, 0x3a5c,
0x3f2b, 0xbc89, 0xbd81, 0x3d13, 0x3dba, 0xbcc8, 0xbc38, 0xbea3, 0x3ba5, 0xbebc,
0x3f16, 0xbcb0, 0xbe00, 0xbf19, 0x3eeb, 0xbeb8, 0x3ee7, 0xbea8, 0x3e4d, 0xbe87,
0x3eeb, 0xbeb8, 0x3ead, 0xbf09, 0xbf2a, 0xbe6d, 0x3e5b, 0xbe84, 0x3ef2, 0x3f10,
0x3d80, 0xbe4c, 0x3e1d, 0xbef3, 0xbe0e, 0xbe13, 0xbeb5, 0x3e87, 0x3ec6, 0x3ee2,
0xbed9, 0x3c9b, 0x3f1b, 0xbf5a, 0x3eae, 0x3d4b, 0x3fad, 0xbf36, 0x3ee8, 0xbf55,
0xbf69, 0xbe98, 0x3e8a, 0xbea2, 0x3f23, 0xbe16, 0x3f46, 0x3ec1, 0xbd53, 0xbea8,
0xbeb4, 0x3ec8, 0x3e3f, 0xbebf, 0x3e37, 0x3e79, 0xbe2c, 0xbeb7, 0xbe52, 0x3ee9,
0xbe02, 0xbf0b, 0xbe4b, 0x3ee7, 0x3e17, 0x3d92, 0x3e32, 0xbe4c, 0x3d4f, 0x3e30,
0x3e06, 0xbf8d, 0x3de1, 0x3f13, 0x3f17, 0xbd4c, 0x3e63, 0xbd8f, 0xbee8, 0x3d4c,
0xbe26, 0x3e73, 0x3f09, 0x3d6a, 0xbed4, 0x3e37, 0x3e87, 0xbe7e, 0xbebf, 0xbd97,
0xbd38, 0x3f23, 0xbd45, 0x3dbb, 0xbe26, 0xbeec, 0x3e84, 0xbd3e, 0xbdfd, 0xbdcf,
0xbe75, 0xbb1b, 0x3ec2, 0x3e07, 0xbdd4, 0x3eae, 0xbd83, 0xbe0a, 0xbdbb, 0xbe5d,
0xbf3c, 0x3e8c, 0x3d41, 0x3e68, 0x3ddc, 0xbe1b, 0xbc76, 0xbda4, 0x3e42, 0x3e0d,
0xbef0, 0x3d1e, 0x3eb6, 0x3ed8, 0x3eb2, 0xbdb8, 0xbda6, 0x3d9b, 0xbef8, 0xbdef,
0x3efc, 0xbd7e, 0x3e5a, 0x3f30, 0xbf3a, 0x3e8b, 0xbe52, 0xbda8, 0xbe7a, 0xbeb0,
0x3e8e, 0x3d8d, 0x3f83, 0x3e62, 0xbe8f, 0x3f05, 0x3e18, 0xbf97, 0x3e85, 0xbf87,
0xbc79, 0x3ed1, 0x3fb9, 0xbf3a, 0xbf11, 0x3ee0, 0x3ec4, 0xbf10, 0x3de8, 0xbf6a,
0xbb94, 0x3eb7, 0x3fb9, 0xbec8, 0xbf2f, 0xbeba, 0x3ef0, 0x3c96, 0xbf19, 0xbe7f,
0xbcea, 0x3e12, 0x3d7c, 0xbd21, 0xbd22, 0xbdaa, 0xbd9b, 0x3e2c, 0xbdc2, 0xbc2a,
0xbca1, 0xb703, 0x3e06, 0xb974, 0xb758, 0xbd68, 0xbd5b, 0xb76b, 0xb96a, 0xba12,
0xaefd, 0x394e, 0xb916, 0xb71b, 0xb162, 0xb322, 0xb441, 0xb5e3, 0xb831, 0xb43c,
0x3d13, 0x3a32, 0xbc0d, 0xba4a, 0xbd84, 0xbafe, 0xbb60, 0x3dff, 0xbb9c, 0xbd9e,
0x3c10, 0x3a8e, 0x3cd4, 0xbcc6, 0x3d65, 0xbd8b, 0xbba2, 0x3dd5, 0xbcfd, 0xbd8b,
0xbdd5, 0xbc21, 0xbe2c, 0x3f8a, 0x3efd, 0x3d78, 0xbdf1, 0xbe72, 0xbf0d, 0xbee4,
0xbbe0, 0xbeb9, 0x3e91, 0x3e40, 0xbd42, 0x3f51, 0xbdef, 0x3e44, 0xbeb6, 0xbf17,
0xbe15, 0xbfa9, 0x3c81, 0x3f7c, 0x3ea5, 0x3da1, 0xbdaf, 0x3d30, 0x3efa, 0xbec2,
0x3ede, 0xbeb7, 0xbe7c, 0xbeb9, 0xbd55, 0x3ef7, 0x3ded, 0xbe5f, 0x3dbd, 0x3de2,
0x3e84, 0x3f3d, 0x3e4e, 0xbeb7, 0xbc3b, 0xbd92, 0x3e73, 0xbf46, 0xbee2, 0x3e64,
0x3de5, 0xbda1, 0x3e31, 0xbe44, 0xbea2, 0x3ef7, 0xbe0e, 0x3c2b, 0x3de4, 0xbe26,
0xbe24, 0x3ec0, 0x3f0b, 0xbbd8, 0xbf4b, 0x3d56, 0x3f05, 0xbe36, 0x3eae, 0xbf31,
0x3ea6, 0xbb9c, 0x3f21, 0x3d17, 0xbf59, 0x3ec0, 0x3f0b, 0xbfab, 0x3f01, 0xbe65,
0x3efd, 0xbee8, 0x3c81, 0xbedc, 0x3ee4, 0xbdae, 0x3f7b, 0xbe06, 0xbe42, 0xbf25,
0x3f07, 0xbe74, 0xbc28, 0xbe66, 0xbf8a, 0x3dc8, 0x3f20, 0x3cd3, 0x3d41, 0x3e6d,
0xbe2e, 0x3eef, 0x3f36, 0xbd45, 0xbeb4, 0x3e01, 0x3e40, 0xbeab, 0xbe8a, 0xbea4,
0xbecf, 0xbef3, 0x3d1d, 0xbd51, 0x3f3b, 0xbdfc, 0x3f2b, 0x3e6e, 0xbe19, 0xbef0,
0xbe67, 0x3ea3, 0x3ed8, 0x3c6d, 0xbe3a, 0x3dca, 0x3ebc, 0xbea6, 0xbe05, 0xbeb8,
0xbec3, 0x3f1b, 0x3ec3, 0x3e20, 0xbe79, 0xbe2e, 0x3e77, 0xbf18, 0x3de6, 0xbde1,
0x3e4d, 0xbeb3, 0x3ee3, 0x3ee5, 0xbea0, 0xbe63, 0x3ec0, 0xbed3, 0x3edb, 0xbf19,
0x3d84, 0xbe42, 0x3ea5, 0x3e7f, 0xbe85, 0x3e61, 0x3dba, 0xbf17, 0x3e57, 0xbdee,
0xbef7, 0x3dc7, 0xbe89, 0x3e79, 0xbe5a, 0xbe7a, 0x3ecb, 0x3ea3, 0x3c93, 0x3e06,
0x3de4, 0x3cef, 0x3dc3, 0xbdd1, 0xbedb, 0x3f11, 0xbdbd, 0xbdd5, 0xbdcb, 0x3cbc,
0x3e22, 0x3b86, 0x3e31, 0x3e72, 0x3e3d, 0x3ea9, 0x3ea8, 0xbeff, 0xbf0c, 0xbebc,
0x3f51, 0x3e02, 0x3f1e, 0xbd0e, 0xbfa8, 0xbb74, 0x3f20, 0xbf49, 0x3f1c, 0xbf2a,
0x3d2e, 0x3e03, 0x3f01, 0x3f15, 0xbf52, 0x3f06, 0x3e97, 0xbfad, 0x3ef1, 0xbec0,
0x3eab, 0x3ec5, 0x3eb5, 0xbe4c, 0xbf30, 0x3f2d, 0x3d63, 0xbe7f, 0xbe98, 0xbebe,
0xbde4, 0x3eed, 0x3fc4, 0xbe37, 0xbed7, 0x3d04, 0xbe9c, 0xbe0c, 0xbf31, 0xbe39,
0xbce1, 0x3e13, 0x3ef6, 0xbc8f, 0xbae4, 0xbe5b, 0xbe52, 0xbb25, 0xbde5, 0xbd37,
0xbb9c, 0x392d, 0x3d65, 0xb8ed, 0xb3b1, 0xbc91, 0xbd09, 0xb4d7, 0xb886, 0xb411,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x3dbb, 0xb84d, 0xbc9b, 0x3b9d, 0xb9b8, 0xbbfe, 0xbbfa, 0xbaaa, 0xbc2b, 0xbd4b,
0xbcba, 0xbc24, 0x3e4a, 0xbd48, 0x3d82, 0xbcd3, 0xbbc5, 0xbcb1, 0xbd51, 0xbd99,
0xbe0e, 0xbd1e, 0x3eb8, 0x3f44, 0x3e59, 0x3e68, 0xbdf8, 0xbeb0, 0xbee9, 0xbeef,
0x3ed8, 0xbdd6, 0x3edc, 0x3f74, 0x3d83, 0xbe85, 0xbe7e, 0xbf27, 0xbe9f, 0xbe98,
0x3f02, 0xbfa9, 0x3f28, 0x3ea6, 0xbf10, 0x3f3b, 0xbe56, 0xbf04, 0xbe46, 0x3f16,
0x3ea2, 0xbe3f, 0x3f23, 0x3f41, 0xbe76, 0x3ed2, 0xbea4, 0xbf45, 0xbe5e, 0xbec6,
0x3e8a, 0x3de5, 0x3f83, 0x3ed1, 0xbf67, 0xbe5e, 0x3f38, 0xbf43, 0x3f1c, 0xbfa1,
0x3efa, 0x3f0e, 0x3ea3, 0x3e1d, 0xbf93, 0x3ede, 0xbecf, 0xbdd9, 0xbd05, 0xbe83,
0x3ea5, 0x3e25, 0x3ea2, 0x3f2c, 0xbea3, 0x3ece, 0x3eb3, 0xbf38, 0xbed1, 0xbf47,
0x3ee2, 0x3eb5, 0x3eae, 0xbe4f, 0xbf4f, 0x3c5f, 0x3f70, 0xbed9, 0x3e84, 0xbf68,
0x3eb3, 0x3ee4, 0x3f41, 0xbe14, 0xbe5e, 0x3e4f, 0x3ea1, 0xbf0a, 0xbe6a, 0xbf6f,
0x3f5e, 0x3ec4, 0x3ea0, 0xbf63, 0xbe99, 0x3c18, 0x3f7e, 0xbe5c, 0xbaa1, 0xbf95,
0x3e67, 0xbe86, 0x3e10, 0xbda2, 0xbf1c, 0x3e44, 0xbe22, 0x3d85, 0x3ee6, 0x3d0f,
0x3e3d, 0x3b67, 0xbe94, 0x3e0d, 0xbe37, 0xbe47, 0x3f81, 0x3e13, 0xbf2a, 0xbe18,
0x3deb, 0xbe8a, 0x3e3d, 0xbe37, 0x3d9a, 0x3ccd, 0x3ea2, 0xbeee, 0x3edd, 0xbe72,
0xbec9, 0x3f09, 0xbd24, 0x3e10, 0x3f12, 0x3eb5, 0x3d2a, 0xbf39, 0xbefe, 0x3c19,
0xbcc8, 0x3f0b, 0xbeb6, 0x3e3f, 0x3e47, 0x3ecd, 0xbc34, 0xbf73, 0xbbaf, 0x3c9f,
0x3dcd, 0x3f23, 0x3e72, 0x3f0d, 0x3e0b, 0x3e20, 0x3f0d, 0xbfd9, 0xbe05, 0xbf0b,
0x3e37, 0x3f3f, 0x3ee0, 0xbc64, 0xbf11, 0x3ebb, 0xbeaf, 0xbf61, 0x3e14, 0xbd8f,
0x3dcb, 0x3ea5, 0x3f0e, 0x3f10, 0xbec1, 0xbe21, 0x3f2c, 0xbfb0, 0x3e79, 0xbf0d,
0x3d0d, 0x3cfe, 0x3f05, 0x3e3b, 0xbe2c, 0x3c3d, 0x3e67, 0xbf4d, 0x3f2d, 0xbf36,
0x3ecc, 0x3e27, 0x3ed3, 0x3e2d, 0xbf31, 0x3f71, 0xbed0, 0xbf57, 0x3ece, 0xbf0c,
0xbe9f, 0x3d32, 0x3e34, 0x3f19, 0xbf22, 0x3ca2, 0x3e7c, 0xbf1f, 0x3edc, 0x3d58,
0x3d56, 0x3d70, 0x3f87, 0xbe2f, 0xbede, 0x3cfb, 0xbe28, 0xbd95, 0xbf2d, 0x3ea6,
0xbdfb, 0x3e0e, 0x3f05, 0xbdc8, 0xbef0, 0x3f06, 0xbda1, 0xbe6e, 0xbed7, 0x3e74,
0xbae9, 0x3e0a, 0x3d74, 0x3caa, 0xba03, 0xbcb9, 0xbd12, 0xbac8, 0xbdf7, 0xbd03,
0xb4e3, 0x3a2b, 0xba10, 0xb7b0, 0xb8a0, 0x3a60, 0xb590, 0xb5d5, 0xba60, 0xb5e3,
0xafdc, 0x3a09, 0xb995, 0xb8a3, 0xb3df, 0xb558, 0xb511, 0xb80b, 0xb902, 0xb656,
0xb82b, 0x3907, 0x3972, 0x3c14, 0xb5ff, 0xbbb9, 0xb791, 0xbaac, 0xb935, 0xbb11,
0xbe11, 0xbc7d, 0x3ecd, 0x3e09, 0x3952, 0xbdc6, 0xbc35, 0xbe2f, 0xbd2e, 0xbd63,
0xbd2d, 0x3bf2, 0x3efa, 0x3f1e, 0xbe19, 0x3e6f, 0xbded, 0xbeed, 0xbe99, 0xbe8d,
0xbd60, 0x3e95, 0x3e52, 0x3eec, 0xbe52, 0x3f25, 0xbee2, 0xbf3e, 0x3df2, 0xbe8e,
0x3e8c, 0x3f74, 0xbc4f, 0xbea3, 0x3d43, 0x3e20, 0xbf18, 0xbf18, 0xbe1e, 0x3e7a,
0x3e1a, 0x3f1a, 0x3e43, 0x3ecd, 0xbf56, 0x3e17, 0xbf20, 0xbf25, 0x3f0e, 0x3d78,
0x3e71, 0x3e83, 0x3e4a, 0x3ed1, 0xbf4e, 0x3f30, 0x3c77, 0xbf82, 0x3f05, 0xbeff,
0xbee7, 0x3e8e, 0xbee5, 0x3ef6, 0x3f35, 0xbc61, 0x3e90, 0xbe32, 0xbe83, 0xbed0,
0xbd7f, 0xbda4, 0x3ed1, 0xbf36, 0x3dae, 0x3e33, 0x3eba, 0xbf2d, 0x3f1b, 0xbdda,
0x3ea9, 0x3f10, 0x3e20, 0xbd81, 0xbe7c, 0xbd0a, 0x3ced, 0x3c53, 0xbf02, 0xbe74,
0x3eab, 0xbe64, 0x3e46, 0xbe75, 0x3ddb, 0x3eb8, 0x3ec9, 0xbed8, 0x3de4, 0xbf1d,
0x3efb, 0xbd4a, 0xbe8f, 0xbdaf, 0x3dec, 0x3d20, 0x3f6c, 0xbe72, 0xbed1, 0xbf01,
0x3cf6, 0xbc19, 0x3ee8, 0xbd52, 0xbec1, 0x3df3, 0x3f3c, 0xbf09, 0x3ea1, 0xbf2d,
0xbdc7, 0x3f0e, 0x3e8f, 0xbe6e, 0xbec7, 0x3e4c, 0x3f70, 0xbf2e, 0x3d9f, 0xbf26,
0xbce2, 0x3ea2, 0xbe0a, 0x3e03, 0x3e93, 0x3df2, 0x3e26, 0xbdf7, 0xbe1c, 0xbf13,
0x3e3e, 0xbe82, 0x3e65, 0x3e51, 0xbef4, 0xbe19, 0x3eb6, 0x3e83, 0xbe19, 0xbe48,
0xbed4, 0xbdee, 0x3f24, 0xbe1b, 0x3d66, 0x3eae, 0x3f19, 0xbec5, 0x3dc6, 0xbf2a,
0xbe93, 0x3f55, 0x3ec2, 0x3dfe, 0xbeb1, 0x3c30, 0x3eef, 0xbee0, 0xbdd1, 0xbf23,
0xbeee, 0x3d49, 0x3f51, 0x3f07, 0x3e13, 0xbde1, 0x3e8c, 0xbf4f, 0xbdaf, 0xbead,
0xbe95, 0x3f63, 0xbcb7, 0xbea1, 0xbd9f, 0x3ee6, 0x3e2a, 0xbf22, 0x3e86, 0xbeda,
0xbe30, 0x3f41, 0x3ea0, 0xbe99, 0xbe1c, 0x3f15, 0xbe36, 0xbf6a, 0xbdb8, 0x3e25,
0x3e08, 0xbf1e, 0x3f24, 0x3d3b, 0xbd9c, 0x3f38, 0x3ed6, 0xbf69, 0x3ee7, 0xbf4d,
0x3df8, 0xbeb4, 0x3f93, 0xbe9e, 0x3e86, 0x3e7a, 0xbbf2, 0xbf46, 0xbe03, 0xbe4e,
0xbe4e, 0x3e95, 0x3f27, 0xbedb, 0xbe92, 0x3ef7, 0xbcab, 0xbdc5, 0xbf63, 0x3ef9,
0xbdcd, 0x3d45, 0x3ecc, 0xbdc5, 0xbee0, 0x3f2a, 0xbe1b, 0xbdae, 0xbe7c, 0x3bae,
0x3e0a, 0x3c3f, 0x3c78, 0x3a8b, 0xbbf0, 0x3cc8, 0xbc43, 0xbc6a, 0xbd13, 0xbdf1,
0x3794, 0xaaa0, 0xb646, 0xb3b6, 0xb029, 0xb4bb, 0xb321, 0xae57, 0xb76f, 0xb257,
0xafd5, 0x3a05, 0xb990, 0xb89e, 0xb3d8, 0xb551, 0xb50c, 0xb807, 0xb8fd, 0xb64f,
0xb4ae, 0x3928, 0xb8bb, 0x396c, 0xb310, 0xb8df, 0xb435, 0xb89e, 0xb826, 0xb89d,
0xbca8, 0xbc7e, 0x3eb2, 0x3e04, 0xb898, 0xbd6a, 0xbb9c, 0xbe97, 0xbce9, 0xbd5e,
0xbe17, 0xbd7a, 0x3f07, 0x3f2a, 0xbe4e, 0x3db6, 0xbd8d, 0xbf33, 0xbd45, 0xbd63,
0xbf2a, 0x3e4e, 0x3f02, 0x3ec4, 0xbeab, 0x3f2c, 0xbe48, 0xbf38, 0x3e84, 0xbddc,
0x3d54, 0x4007, 0x3e96, 0xbe5f, 0xbf0f, 0xbee1, 0xbec9, 0xbcc1, 0x3d88, 0xbf63,
0xbdb0, 0x3f68, 0x3f43, 0x3e9c, 0xbed1, 0xbeda, 0xbf5c, 0xbe59, 0x3e31, 0xbe1d,
0xbe83, 0x3da4, 0x3eda, 0x3e7a, 0xbf16, 0xbde7, 0xbe36, 0x3ebc, 0x3e88, 0xbe80,
0x3ed3, 0x3e5a, 0x3f00, 0xbeb0, 0xbf05, 0xbdfc, 0xbe45, 0x3e36, 0xbe81, 0x3e01,
0x3ea1, 0x3e84, 0x3f0b, 0x3e2a, 0xbf56, 0x3d5e, 0x3e82, 0xbe89, 0xbe01, 0xbeba,
0x3e3a, 0xbe14, 0x3e11, 0xbe45, 0xbec4, 0xbeaa, 0x3ee8, 0xbcdf, 0x3db5, 0x3e5b,
0x3dbe, 0xbec0, 0x3e16, 0x3e9f, 0xbe88, 0x3e82, 0xbdad, 0xbef2, 0x3eda, 0xbcf8,
0x3e32, 0xbf81, 0x3e6e, 0xbdc1, 0xbeef, 0x3e34, 0x3edc, 0x3eb5, 0x3e09, 0x3d95,
0x3faa, 0xbf15, 0x3db7, 0xbe87, 0xbe88, 0x3e4f, 0x3f23, 0xbf37, 0x3d82, 0xbf00,
0xbe3a, 0xbe46, 0xbe52, 0xbe52, 0xbe72, 0x3e90, 0xbe8c, 0x3eaa, 0x3edc, 0x3e82,
0x3e2c, 0x3cbe, 0xbe21, 0x3e4c, 0xbf28, 0x3e80, 0x3f40, 0xbe91, 0x3eab, 0xbf21,
0x3dba, 0xbd10, 0x3d68, 0x3bb5, 0xbe43, 0x3eb2, 0x3e63, 0xbd96, 0xbe2d, 0xbe83,
0x3e6a, 0x3eb7, 0xbe4e, 0x3ef3, 0xbdd3, 0xbe5b, 0x3d92, 0xbeaf, 0x3da3, 0xbeb3,
0xbe47, 0x3f4d, 0x3dea, 0xbe1d, 0xbeac, 0x3ee7, 0xbdd8, 0xbeb1, 0x3d4b, 0xbe91,
0x3dd7, 0x3e9d, 0x3e92, 0x3e8a, 0x3ecd, 0x3ea3, 0xbe4e, 0xbcc9, 0xbed0, 0xbf87,
0xbe50, 0x3f6d, 0x3ed3, 0x3edf, 0xbf09, 0x3ca1, 0x3ecf, 0xbf2c, 0xbf00, 0xbe92,
0x3d3a, 0x3e62, 0xbe08, 0xbe4a, 0x3f43, 0xbd8d, 0x3ee0, 0xbf95, 0x3ee0, 0xbeae,
0x3f06, 0xbf04, 0x3ef7, 0x3eec, 0x3dcd, 0x3e84, 0xbebb, 0xbf46, 0xbd75, 0xbde2,
0x3ea0, 0xbe7a, 0x3d9a, 0xbc63, 0xbddb, 0x3f1e, 0xbf02, | |
<gh_stars>0
import httplib2
import json
from tasks.taskmanager import TaskManager
from tasks.task import *
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
import time
import ast
class BLEVE:
STOPWORDS = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves',
'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him',
'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its',
'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these',
'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been',
'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did',
'doing', 'would', 'should', 'could', 'ought', "i'm", "you're",
"he's", "she's", "it's", "we're", "they're", "i've", "you've",
"we've", "they've", "i'd", "you'd", "he'd", "she'd", "we'd",
"they'd", "i'll", "you'll", "he'll", "she'll", "we'll",
"they'll", "isn't", "aren't", "wasn't", "weren't", "hasn't",
"haven't", "hadn't", "doesn't", "don't", "didn't", "won't",
"wouldn't", "shan't", "shouldn't", "can't", 'cannot',
"couldn't", "mustn't", "let's", "that's", "who's", "what's",
"here's", "there's", "when's", "where's", "why's", "how's",
'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as',
'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about',
'against', 'between', 'into', 'through', 'during', 'before',
'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again', 'further',
'then', 'once', 'here', 'there', 'when', 'where', 'why',
'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most',
'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'very']
STD_ANALYZER = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "standard",
"stopwords": STOPWORDS
}
}
}
}
}
CUSTOM_ANALYZER = {
"settings": {
"analysis": {
"analyzer": {
},
"char_filter": {
"mapping": {
"type": "mapping",
"mappings": [
"f => ph"
]
}
},
"tokenizer":{
"alphanumeric":{
"type":"pattern",
"pattern":"[^a-zA-Z0-9_]"
}
},
"filter": {
"back_edge_ngram": {
"type":"edgeNGram",
"min_gram":3,
"max_gram":5,
"side":"back"
},
"front_edge_ngram": {
"type": "edgeNGram",
"min_gram": 3,
"max_gram": 5,
"side": "front"
},
"ngram": {
"type": "nGram",
"min_gram": 3,
"max_gram": 5,
"side": "front"
},
"keyword_marker": {
"type":"keyword_marker",
"keywords":STOPWORDS
},
"stopwords": {
"type":"stop",
"stopwords":STOPWORDS
},
"length": {
"type":"length",
"min":3,
"max":5
},
"shingle": {
"type":"shingle",
"max_shingle_size":5,
"min_shingle_size":2,
"output_unigrams":"false",
"output_unigrams_if_no_shingles":"false",
"token_separator":"",
"filler_token":""
},
"truncate": {
"length": 10,
"type": "truncate"
},
"cjk_bigram": {
"type": "cjk_bigram"
},
"stemmer_it_light": {
"type": "stemmer",
"name": "light_italian"
},
"stemmer_fr_light": {
"type": "stemmer",
"name": "light_french"
},
"stemmer_fr_min": {
"type": "stemmer",
"name": "minimal_french"
},
"stemmer_pt_light": {
"type": "stemmer",
"name": "light_portuguese"
}
}
}
}
}
FTS_ES_ANALYZER_MAPPING = {
"char_filters" : {
"html":"html_strip",
"zero_width_spaces":"html_strip",
"mapping":"mapping"
},
"token_filters": {
"apostrophe":"apostrophe",
"elision_fr":"elision",
"to_lower":"lowercase",
"ngram":"ngram",
"back_edge_ngram":"back_edge_ngram",
"front_edge_ngram": "front_edge_ngram",
"length":"length",
"shingle":"shingle",
"stemmer_porter":"porter_stem",
"truncate":"truncate",
"keyword_marker":"keyword_marker",
"stopwords":"stopwords",
"cjk_width":"cjk_width",
"cjk_bigram":"cjk_bigram",
"stemmer_it_light":"stemmer_it_light",
"stemmer_fr_light":"stemmer_fr_light",
"stemmer_fr_min": "stemmer_fr_min",
"stemmer_pt_light": "stemmer_pt_light"
},
"tokenizers": {
"letter":"letter",
"web":"uax_url_email",
"whitespace":"whitespace",
"unicode":"standard",
"single":"keyword",
"alphanumeric":"alphanumeric"
}
}
class ElasticSearchBase(object):
def __init__(self, host, logger):
#host is in the form IP address
self.__log = logger
self.__host = host
self.__document = {}
self.__mapping = {}
self.__STATUSOK = 200
self.__indices = []
self.__index_types = {}
self.__connection_url = 'http://{0}:{1}/'.format(self.__host.ip,
self.__host.port)
self.es_queries = []
self.task_manager = TaskManager("ES_Thread")
self.task_manager.start()
self.http = httplib2.Http
def _http_request(self, api, method='GET', params='', headers=None,
timeout=600):
if not headers:
headers = {'Content-Type': 'application/json',
'Accept': '*/*'}
try:
response, content = httplib2.Http(timeout=timeout).request(api,
method,
params,
headers)
if response['status'] in ['200', '201', '202']:
return True, content, response
else:
try:
json_parsed = ast.literal_eval(content)
except ValueError as e:
json_parsed = {}
json_parsed["error"] = "status: {0}, content: {1}".\
format(response['status'], content)
reason = "unknown"
if "error" in json_parsed:
reason = json_parsed["error"]
self.__log.error('{0} error {1} reason: {2} {3}'.format(
api,
response['status'],
reason,
content.rstrip(b'\n')))
return False, content, response
except socket.error as e:
self.__log.error("socket error while connecting to {0} error {1} ".
format(api, e))
raise ServerUnavailableException(ip=self.__host.ip)
def restart_es(self):
shell = RemoteMachineShellConnection(self.__host)
es_restart_cmd = "/etc/init.d/elasticsearch restart"
o, e = shell.execute_non_sudo_command(es_restart_cmd)
shell.log_command_output(o, e)
es_start = False
for i in range(2):
self.sleep(10)
if self.is_running():
es_start = True
break
if not es_start:
self.fail("Could not reach Elastic Search server on %s"
% self.ip)
else:
self.__log.info("Restarted ES server %s successfully" % self.__host.ip)
def is_running(self):
"""
make sure ES is up and running
check the service is running , if not abort the test
"""
try:
status, content, _ = self._http_request(
self.__connection_url,
'GET')
if status:
return True
else:
return False
except Exception as e:
raise e
def delete_index(self, index_name):
"""
Deletes index
"""
try:
url = self.__connection_url + index_name
status, content, _ = self._http_request(url, 'DELETE')
except Exception as e:
raise e
def delete_indices(self):
"""
Delete all indices present
"""
for index_name in self.__indices:
self.delete_index(index_name)
self.__log.info("ES index %s deleted" % index_name)
def create_empty_index(self, index_name):
"""
Creates an empty index, given the name
"""
try:
self.delete_index(index_name)
status, content, _ = self._http_request(
self.__connection_url + index_name,
'PUT')
if status:
self.__indices.append(index_name)
except Exception as e:
raise Exception("Could not create ES index : %s" % e)
def create_empty_index_with_bleve_equivalent_std_analyzer(self, index_name):
"""
Refer:
https://www.elastic.co/guide/en/elasticsearch/guide/current/
configuring-analyzers.html
"""
try:
self.delete_index(index_name)
status, content, _ = self._http_request(
self.__connection_url + index_name,
'PUT', json.dumps(BLEVE.STD_ANALYZER))
if status:
self.__indices.append(index_name)
except Exception as e:
raise Exception("Could not create index with ES std analyzer : %s"
% e)
def create_index_mapping(self, index_name, es_mapping, fts_mapping=None):
"""
Creates a new default index, with the given mapping
"""
self.delete_index(index_name)
if not fts_mapping:
map = {"mappings": es_mapping, "settings": BLEVE.STD_ANALYZER['settings']}
else :
# Find the ES equivalent char_filter, token_filter and tokenizer
es_settings = self.populate_es_settings(fts_mapping['params']
['mapping']['analysis']['analyzers'])
# Create an ES custom index definition
map = {"mappings": es_mapping, "settings": es_settings['settings']}
# Create ES index
try:
self.__log.info("Creating %s with mapping %s"
% (index_name, json.dumps(map, indent=3)))
status, content, _ = self._http_request(
self.__connection_url + index_name,
'PUT',
json.dumps(map))
if status:
self.__log.info("SUCCESS: ES index created with above mapping")
else:
raise Exception("Could not create ES index")
except Exception as e:
raise Exception("Could not create ES index : %s" % e)
def populate_es_settings(self, fts_custom_analyzers_def):
"""
Populates the custom analyzer defintion of the ES Index Definition.
Refers to the FTS Custom Analyzers definition and creates an
equivalent definition for each ES custom analyzer
:param fts_custom_analyzers_def: FTS Custom Analyzer Definition
:return:
"""
num_custom_analyzers = len(fts_custom_analyzers_def)
n = 1
analyzer_map = {}
while n <= num_custom_analyzers:
customAnalyzerName = list(fts_custom_analyzers_def.keys())[n-1]
fts_char_filters = fts_custom_analyzers_def[customAnalyzerName]["char_filters"]
fts_tokenizer = fts_custom_analyzers_def[customAnalyzerName]["tokenizer"]
fts_token_filters = fts_custom_analyzers_def[customAnalyzerName]["token_filters"]
analyzer_map[customAnalyzerName] = {}
analyzer_map[customAnalyzerName]["char_filter"] = []
analyzer_map[customAnalyzerName]["filter"] = []
analyzer_map[customAnalyzerName]["tokenizer"] = ""
for fts_char_filter in fts_char_filters:
analyzer_map[customAnalyzerName]['char_filter'].append( \
BLEVE.FTS_ES_ANALYZER_MAPPING['char_filters'][fts_char_filter])
analyzer_map[customAnalyzerName]['tokenizer'] = \
BLEVE.FTS_ES_ANALYZER_MAPPING['tokenizers'][fts_tokenizer]
for fts_token_filter in fts_token_filters:
analyzer_map[customAnalyzerName]['filter'].append( \
BLEVE.FTS_ES_ANALYZER_MAPPING['token_filters'][fts_token_filter])
n += 1
analyzer = BLEVE.CUSTOM_ANALYZER
analyzer['settings']['analysis']['analyzer'] = analyzer_map
return analyzer
def create_alias(self, name, indexes):
"""
@name: alias name
@indexes: list of target indexes
"""
try:
self.__log.info("Checking if ES alias '{0}' exists...".format(name))
self.delete_index(name)
alias_info = {"actions": []}
for index in indexes:
alias_info['actions'].append({"add": {"index": index,
"alias": name}})
self.__log.info("Creating ES alias '{0}' on {1}...".format(
name,
indexes))
status, content, _ = self._http_request(
self.__connection_url + "_aliases",
'POST',
json.dumps(alias_info))
if status:
self.__log.info("ES alias '{0}' created".format(name))
self.__indices.append(name)
except Exception as ex:
raise Exception("Could not create ES alias : %s" % ex)
def async_load_ES(self, index_name, gen, op_type='create'):
"""
Asynchronously run query against FTS and ES and compare result
note: every task runs a single query
"""
_task = ESLoadGeneratorTask(es_instance=self,
index_name=index_name,
generator=gen,
op_type=op_type)
self.task_manager.schedule(_task)
return _task
def async_bulk_load_ES(self, index_name, gen, op_type='create', batch=5000):
_task = ESBulkLoadGeneratorTask(es_instance=self,
index_name=index_name,
generator=gen,
op_type=op_type,
batch=batch)
self.task_manager.schedule(_task)
return _task
def load_bulk_data(self, filename):
"""
Bulk load to ES from a file
curl -s -XPOST 172.23.105.25:9200/_bulk --data-binary @req
cat req:
{ "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "1" } }
{ "field1" : "value1" , "field2" : "value2"}
{ "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "2" } }
{ "field1" : "value1" , "field2" : "value2"}
"""
try:
import os
url = self.__connection_url + "/_bulk"
data = open(filename, "rb").read()
status, content, _ = self._http_request(url,
'POST',
data)
return status
except Exception as e:
raise e
def load_data(self, index_name, document_json, doc_type, doc_id, scope=None, collection=None):
"""
index_name : name of index into which the doc is loaded
document_json: json doc
doc_type : type of doc. Usually the '_type' field in the doc body
doc_id : document id
"""
try:
url = self.__connection_url + index_name + '/' + doc_type + '/' +\
doc_id
status, content, _ = self._http_request(url,
'POST',
document_json)
except Exception as e:
raise e
def update_index(self, index_name):
"""
This procedure will refresh index when insert is performed .
Need to call this API to take search in effect.
:param index_name:
:return:
"""
try:
status, content, _ = self._http_request(
self.__connection_url + index_name | |
and
# avoid regression on https://github.com/DataDog/dd-agent/pull/3359
check._submit(check.metrics_mapper[ref_gauge.name], ref_gauge, custom_tags=tags)
check.gauge.assert_called_with(
'prometheus.process.vm.bytes',
39211008.0,
['test', 'my_1st_label:my_1st_label_value', 'my_2nd_label:my_2nd_label_value'],
hostname=None,
)
def test_submit_gauge_with_custom_tags(mocked_prometheus_check, ref_gauge):
""" Providing custom tags should add them as is on the gauge call """
tags = ['env:dev', 'app:my_pretty_app']
check = mocked_prometheus_check
check._submit(check.metrics_mapper[ref_gauge.name], ref_gauge, custom_tags=tags)
check.gauge.assert_called_with(
'prometheus.process.vm.bytes', 39211008.0, ['env:dev', 'app:my_pretty_app'], hostname=None
)
def test_submit_gauge_with_labels_mapper(mocked_prometheus_check, ref_gauge):
"""
Submitting metrics that contain labels mappers should result in tags
on the gauge call with transformed tag names
"""
_l1 = ref_gauge.metric[0].label.add()
_l1.name = 'my_1st_label'
_l1.value = 'my_1st_label_value'
_l2 = ref_gauge.metric[0].label.add()
_l2.name = 'my_2nd_label'
_l2.value = 'my_2nd_label_value'
check = mocked_prometheus_check
check.labels_mapper = {
'my_1st_label': 'transformed_1st',
'non_existent': 'should_not_matter',
'env': 'dont_touch_custom_tags',
}
tags = ['env:dev', 'app:my_pretty_app']
check._submit(check.metrics_mapper[ref_gauge.name], ref_gauge, custom_tags=tags)
check.gauge.assert_called_with(
'prometheus.process.vm.bytes',
39211008.0,
['env:dev', 'app:my_pretty_app', 'transformed_1st:my_1st_label_value', 'my_2nd_label:my_2nd_label_value'],
hostname=None,
)
def test_submit_gauge_with_exclude_labels(mocked_prometheus_check, ref_gauge):
"""
Submitting metrics when filtering with exclude_labels should end up with
a filtered tags list
"""
_l1 = ref_gauge.metric[0].label.add()
_l1.name = 'my_1st_label'
_l1.value = 'my_1st_label_value'
_l2 = ref_gauge.metric[0].label.add()
_l2.name = 'my_2nd_label'
_l2.value = 'my_2nd_label_value'
check = mocked_prometheus_check
check.labels_mapper = {
'my_1st_label': 'transformed_1st',
'non_existent': 'should_not_matter',
'env': 'dont_touch_custom_tags',
}
tags = ['env:dev', 'app:my_pretty_app']
check.exclude_labels = ['my_2nd_label', 'whatever_else', 'env'] # custom tags are not filtered out
check._submit(check.metrics_mapper[ref_gauge.name], ref_gauge, custom_tags=tags)
check.gauge.assert_called_with(
'prometheus.process.vm.bytes',
39211008.0,
['env:dev', 'app:my_pretty_app', 'transformed_1st:my_1st_label_value'],
hostname=None,
)
def test_submit_counter(mocked_prometheus_check):
_counter = metrics_pb2.MetricFamily()
_counter.name = 'my_counter'
_counter.help = 'Random counter'
_counter.type = 0 # COUNTER
_met = _counter.metric.add()
_met.counter.value = 42
check = mocked_prometheus_check
check._submit('custom.counter', _counter)
check.gauge.assert_called_with('prometheus.custom.counter', 42, [], hostname=None)
def test_submits_summary(mocked_prometheus_check):
_sum = metrics_pb2.MetricFamily()
_sum.name = 'my_summary'
_sum.help = 'Random summary'
_sum.type = 2 # SUMMARY
_met = _sum.metric.add()
_met.summary.sample_count = 42
_met.summary.sample_sum = 3.14
_q1 = _met.summary.quantile.add()
_q1.quantile = 10.0
_q1.value = 3
_q2 = _met.summary.quantile.add()
_q2.quantile = 4.0
_q2.value = 5
check = mocked_prometheus_check
check._submit('custom.summary', _sum)
check.gauge.assert_has_calls(
[
mock.call('prometheus.custom.summary.count', 42, [], hostname=None),
mock.call('prometheus.custom.summary.sum', 3.14, [], hostname=None),
mock.call('prometheus.custom.summary.quantile', 3, ['quantile:10.0'], hostname=None),
mock.call('prometheus.custom.summary.quantile', 5, ['quantile:4.0'], hostname=None),
]
)
def test_submit_histogram(mocked_prometheus_check):
_histo = metrics_pb2.MetricFamily()
_histo.name = 'my_histogram'
_histo.help = 'Random histogram'
_histo.type = 4 # HISTOGRAM
_met = _histo.metric.add()
_met.histogram.sample_count = 42
_met.histogram.sample_sum = 3.14
_b1 = _met.histogram.bucket.add()
_b1.upper_bound = 12.7
_b1.cumulative_count = 33
_b2 = _met.histogram.bucket.add()
_b2.upper_bound = 18.2
_b2.cumulative_count = 666
check = mocked_prometheus_check
check._submit('custom.histogram', _histo)
check.gauge.assert_has_calls(
[
mock.call('prometheus.custom.histogram.count', 42, [], hostname=None),
mock.call('prometheus.custom.histogram.sum', 3.14, [], hostname=None),
mock.call('prometheus.custom.histogram.count', 33, ['upper_bound:12.7'], hostname=None),
mock.call('prometheus.custom.histogram.count', 666, ['upper_bound:18.2'], hostname=None),
]
)
def test_submit_rate(mocked_prometheus_check):
_rate = metrics_pb2.MetricFamily()
_rate.name = 'my_rate'
_rate.help = 'Random rate'
_rate.type = 1 # GAUGE
_met = _rate.metric.add()
_met.gauge.value = 42
check = mocked_prometheus_check
check.rate_metrics = ["my_rate"]
check._submit('custom.rate', _rate)
check.rate.assert_called_with('prometheus.custom.rate', 42, [], hostname=None)
def test_filter_sample_on_gauge(p_check):
"""
Add a filter blacklist on the check matching one line and make sure
only the two other lines are parsed and sent downstream.
"""
text_data = (
'# HELP kube_deployment_status_replicas The number of replicas per deployment.\n'
'# TYPE kube_deployment_status_replicas gauge\n'
'kube_deployment_status_replicas{deployment="event-exporter-v0.1.7"} 1\n'
'kube_deployment_status_replicas{deployment="heapster-v1.4.3"} 1\n'
'kube_deployment_status_replicas{deployment="kube-dns"} 2\n'
)
expected_metric = metrics_pb2.MetricFamily()
expected_metric.help = "The number of replicas per deployment."
expected_metric.name = "kube_deployment_status_replicas"
expected_metric.type = 1
gauge1 = expected_metric.metric.add()
gauge1.gauge.value = 1
label1 = gauge1.label.add()
label1.name = "deployment"
label1.value = "event-exporter-v0.1.7"
gauge2 = expected_metric.metric.add()
gauge2.gauge.value = 1
label2 = gauge2.label.add()
label2.name = "deployment"
label2.value = "heapster-v1.4.3"
# Iter on the generator to get all metrics
response = MockResponse(text_data, 'text/plain; version=0.0.4')
check = p_check
check._text_filter_blacklist = ["deployment=\"kube-dns\""]
metrics = [k for k in check.parse_metric_family(response)]
assert 1 == len(metrics)
current_metric = metrics[0]
assert expected_metric == current_metric
def test_parse_one_gauge(p_check):
"""
name: "etcd_server_has_leader"
help: "Whether or not a leader exists. 1 is existence, 0 is not."
type: GAUGE
metric {
gauge {
value: 1.0
}
}
"""
text_data = (
"# HELP etcd_server_has_leader Whether or not a leader exists. 1 is existence, 0 is not.\n"
"# TYPE etcd_server_has_leader gauge\n"
"etcd_server_has_leader 1\n"
)
expected_etcd_metric = metrics_pb2.MetricFamily()
expected_etcd_metric.help = "Whether or not a leader exists. 1 is existence, 0 is not."
expected_etcd_metric.name = "etcd_server_has_leader"
expected_etcd_metric.type = 1
expected_etcd_metric.metric.add().gauge.value = 1
# Iter on the generator to get all metrics
response = MockResponse(text_data, 'text/plain; version=0.0.4')
check = p_check
metrics = [k for k in check.parse_metric_family(response)]
assert 1 == len(metrics)
current_metric = metrics[0]
assert expected_etcd_metric == current_metric
# Remove the old metric and add a new one with a different value
expected_etcd_metric.metric.pop()
expected_etcd_metric.metric.add().gauge.value = 0
assert expected_etcd_metric != current_metric
# Re-add the expected value but as different type: it should works
expected_etcd_metric.metric.pop()
expected_etcd_metric.metric.add().gauge.value = 1.0
assert expected_etcd_metric == current_metric
def test_parse_one_counter(p_check):
"""
name: "go_memstats_mallocs_total"
help: "Total number of mallocs."
type: COUNTER
metric {
counter {
value: 18713.0
}
}
"""
text_data = (
"# HELP go_memstats_mallocs_total Total number of mallocs.\n"
"# TYPE go_memstats_mallocs_total counter\n"
"go_memstats_mallocs_total 18713\n"
)
expected_etcd_metric = metrics_pb2.MetricFamily()
expected_etcd_metric.help = "Total number of mallocs."
expected_etcd_metric.name = "go_memstats_mallocs_total"
expected_etcd_metric.type = 0
expected_etcd_metric.metric.add().counter.value = 18713
# Iter on the generator to get all metrics
response = MockResponse(text_data, 'text/plain; version=0.0.4')
check = p_check
metrics = [k for k in check.parse_metric_family(response)]
assert 1 == len(metrics)
current_metric = metrics[0]
assert expected_etcd_metric == current_metric
# Remove the old metric and add a new one with a different value
expected_etcd_metric.metric.pop()
expected_etcd_metric.metric.add().counter.value = 18714
assert expected_etcd_metric != current_metric
def test_parse_one_histograms_with_label(p_check):
text_data = (
'# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal.\n'
'# TYPE etcd_disk_wal_fsync_duration_seconds histogram\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.001"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.002"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.004"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.008"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.016"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.032"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.064"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.128"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.256"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="0.512"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="1.024"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="2.048"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="4.096"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="8.192"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{app="vault",le="+Inf"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_sum{app="vault"} 0.026131671\n'
'etcd_disk_wal_fsync_duration_seconds_count{app="vault"} 4\n'
)
expected_etcd_vault_metric = metrics_pb2.MetricFamily()
expected_etcd_vault_metric.help = "The latency distributions of fsync called by wal."
expected_etcd_vault_metric.name = "etcd_disk_wal_fsync_duration_seconds"
expected_etcd_vault_metric.type = 4
histogram_metric = expected_etcd_vault_metric.metric.add()
# Label for app vault
summary_label = histogram_metric.label.add()
summary_label.name, summary_label.value = "app", "vault"
for upper_bound, cumulative_count in [
(0.001, 2),
(0.002, 2),
(0.004, 2),
(0.008, 2),
(0.016, 4),
(0.032, 4),
(0.064, 4),
(0.128, 4),
(0.256, 4),
(0.512, 4),
(1.024, 4),
(2.048, 4),
(4.096, 4),
(8.192, 4),
(float('inf'), 4),
]:
bucket = histogram_metric.histogram.bucket.add()
bucket.upper_bound = upper_bound
bucket.cumulative_count = cumulative_count
# Root histogram sample
histogram_metric.histogram.sample_count = 4
histogram_metric.histogram.sample_sum = 0.026131671
# Iter on the generator to get all metrics
response = MockResponse(text_data, 'text/plain; version=0.0.4')
check = p_check
metrics = [k for k in check.parse_metric_family(response)]
assert 1 == len(metrics)
current_metric = metrics[0]
assert expected_etcd_vault_metric == current_metric
def test_parse_one_histogram(p_check):
"""
name: "etcd_disk_wal_fsync_duration_seconds"
help: "The latency distributions of fsync called by wal."
type: HISTOGRAM
metric {
histogram {
sample_count: 4
sample_sum: 0.026131671
bucket {
cumulative_count: 2
upper_bound: 0.001
}
bucket {
cumulative_count: 2
upper_bound: 0.002
}
bucket {
cumulative_count: 2
upper_bound: 0.004
}
bucket {
cumulative_count: 2
upper_bound: 0.008
}
bucket {
cumulative_count: 4
upper_bound: 0.016
}
bucket {
cumulative_count: 4
upper_bound: 0.032
}
bucket {
cumulative_count: 4
upper_bound: 0.064
}
bucket {
cumulative_count: 4
upper_bound: 0.128
}
bucket {
cumulative_count: 4
upper_bound: 0.256
}
bucket {
cumulative_count: 4
upper_bound: 0.512
}
bucket {
cumulative_count: 4
upper_bound: 1.024
}
bucket {
cumulative_count: 4
upper_bound: 2.048
}
bucket {
cumulative_count: 4
upper_bound: 4.096
}
bucket {
cumulative_count: 4
upper_bound: 8.192
}
bucket {
cumulative_count: 4
upper_bound: inf
}
}
}
"""
text_data = (
'# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal.\n'
'# TYPE etcd_disk_wal_fsync_duration_seconds histogram\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 2\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 4\n'
'etcd_disk_wal_fsync_duration_seconds_sum 0.026131671\n'
'etcd_disk_wal_fsync_duration_seconds_count 4\n'
)
expected_etcd_metric = metrics_pb2.MetricFamily()
expected_etcd_metric.help = "The latency distributions of fsync called by wal."
expected_etcd_metric.name = "etcd_disk_wal_fsync_duration_seconds"
expected_etcd_metric.type = 4
histogram_metric = expected_etcd_metric.metric.add()
for upper_bound, cumulative_count in [
(0.001, 2),
(0.002, 2),
(0.004, 2),
(0.008, 2),
(0.016, 4),
(0.032, 4),
(0.064, 4),
(0.128, 4),
(0.256, 4),
(0.512, 4),
(1.024, 4),
(2.048, 4),
(4.096, 4),
(8.192, 4),
(float('inf'), 4),
]:
bucket = histogram_metric.histogram.bucket.add()
bucket.upper_bound = upper_bound
bucket.cumulative_count = cumulative_count
# Root histogram sample
histogram_metric.histogram.sample_count = 4
histogram_metric.histogram.sample_sum = 0.026131671
# Iter on the generator to get all metrics
response = MockResponse(text_data, 'text/plain; version=0.0.4')
check = p_check
metrics = [k for k in check.parse_metric_family(response)]
assert 1 == len(metrics)
current_metric = metrics[0]
assert expected_etcd_metric == current_metric
def test_parse_two_histograms_with_label(p_check):
text_data = (
'# HELP etcd_disk_wal_fsync_duration_seconds | |
<reponame>clviegas/StageDP<filename>src/models/tree.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Yizhong
# created_at: 10/26/2016 下午8:37
import sys
from os.path import isfile
from features.extraction import ActionFeatureGenerator, RelationFeatureGenerator
from models.state import ParsingState
from nltk import Tree
from nltk.draw.tree import TreeWidget
from nltk.draw.util import CanvasFrame
from utils.document import Doc
from utils.other import rel2class
from utils.span import SpanNode
class RstTree(object):
def __init__(self, fdis=None, fmerge=None):
self.fdis = fdis
self.fmerge = fmerge
self.binary = True
self.tree, self.doc = None, None
def assign_tree(self, tree):
""" Assign a tree instance from external resource
"""
self.tree = tree
def assign_doc(self, doc):
""" Assign a doc instance from external resource
"""
self.doc = doc
def build(self):
""" Build BINARY RST tree
"""
with open(self.fdis) as fin:
text = fin.read()
# Build RST as annotation
self.tree = RstTree.build_tree(text)
# Binarize it
self.tree = RstTree.binarize_tree(self.tree)
# Read doc file
if isfile(self.fmerge):
doc = Doc()
doc.read_from_fmerge(self.fmerge)
self.doc = doc
else:
raise IOError("File doesn't exist: {}".format(self.fmerge))
RstTree.down_prop(self.tree)
RstTree.back_prop(self.tree, self.doc)
def generate_action_samples(self, bcvocab):
""" Generate action samples from an binary RST tree
:type bcvocab: dict
:param bcvocab: brown clusters of words
"""
# Parsing actions and relations
actions, relations = self.decode_rst_tree()
# Initialize queue and stack
queue = RstTree.get_edu_node(self.tree)
stack = []
# Start simulating the shift-reduce parsing
sr_parser = ParsingState(stack, queue)
for idx, action in enumerate(actions):
stack, queue = sr_parser.get_status()
# Generate features
fg = ActionFeatureGenerator(stack, queue, actions[:idx], self.doc, bcvocab)
action_feats = fg.gen_features()
yield action_feats, action
# Change status of stack/queue
# action and relation are necessary here to avoid change rst_trees
sr_parser.operate(action)
def generate_relation_samples(self, bcvocab, level):
""" Generate relation samples from an binary RST tree
:type bcvocab: dict
:param bcvocab: brown clusters of words
"""
post_nodelist = RstTree.postorder_DFT(self.tree, [])
for node in post_nodelist:
if node.level == level and (node.lnode is not None) and (node.rnode is not None):
fg = RelationFeatureGenerator(node, self, node.level, bcvocab)
relation_feats = fg.gen_features()
form = node.form
if (form == 'NN') or (form == 'NS'):
relation = RstTree.extract_relation(node.rnode.relation)
else:
relation = RstTree.extract_relation(node.lnode.relation)
yield relation_feats, relation
def decode_rst_tree(self):
""" Decoding Shift-reduce actions and span relations from an binary RST tree
"""
# Start decoding
post_nodelist = RstTree.postorder_DFT(self.tree, [])
action_list = []
relation_list = []
for node in post_nodelist:
if (node.lnode is None) and (node.rnode is None):
action_list.append(('Shift', None))
relation_list.append(None)
elif (node.lnode is not None) and (node.rnode is not None):
form = node.form
if (form == 'NN') or (form == 'NS'):
relation = RstTree.extract_relation(node.rnode.relation)
else:
relation = RstTree.extract_relation(node.lnode.relation)
action_list.append(('Reduce', form))
relation_list.append(relation)
else:
raise ValueError("Can not decode Shift-Reduce action")
return action_list, relation_list
def convert_node_to_str(self, node, sep=' '):
text = node.text
words = [self.doc.token_dict[tidx].word for tidx in text]
return sep.join(words)
@staticmethod
def get_edu_node(tree):
""" Get all left nodes. It can be used for generating training
examples from gold RST tree
:type tree: SpanNode instance
:param tree: an binary RST tree
"""
# Post-order depth-first traversal
post_nodelist = RstTree.postorder_DFT(tree, [])
# EDU list
edulist = []
for node in post_nodelist:
if (node.lnode is None) and (node.rnode is None):
edulist.append(node)
return edulist
@staticmethod
def build_tree(text):
""" Build tree from *.dis file
:type text: string
:param text: RST tree read from a *.dis file
"""
tokens = text.strip().replace('//TT_ERR', '').replace('\n', '').replace('(', ' ( ').replace(')', ' ) ').split()
# print 'tokens = {}'.format(tokens)
queue = RstTree.process_text(tokens)
# print 'queue = {}'.format(queue)
stack = []
while queue:
token = queue.pop(0)
if token == ')':
# If ')', start processing
content = [] # Content in the stack
while stack:
cont = stack.pop()
if cont == '(':
break
else:
content.append(cont)
content.reverse() # Reverse to the original order
# Parse according to the first content word
if len(content) < 2:
raise ValueError("content = {}".format(content))
label = content.pop(0)
if label == 'Root':
node = SpanNode(prop=label)
node.create_node(content)
stack.append(node)
elif label == 'Nucleus':
node = SpanNode(prop=label)
node.create_node(content)
stack.append(node)
elif label == 'Satellite':
node = SpanNode(prop=label)
node.create_node(content)
stack.append(node)
elif label == 'span':
# Merge
beginindex = int(content.pop(0))
endindex = int(content.pop(0))
stack.append(('span', beginindex, endindex))
elif label == 'leaf':
# Merge
eduindex = int(content.pop(0))
RstTree.check_content(label, content)
stack.append(('leaf', eduindex, eduindex))
elif label == 'rel2par':
# Merge
relation = content.pop(0)
RstTree.check_content(label, content)
stack.append(('relation', relation))
elif label == 'text':
# Merge
txt = RstTree.create_text(content)
stack.append(('text', txt))
else:
raise ValueError(
"Unrecognized parsing label: {} \n\twith content = {}\n\tstack={}\n\tqueue={}".format(label,
content,
stack,
queue))
else:
# else, keep push into the stack
stack.append(token)
return stack[-1]
@staticmethod
def process_text(tokens):
""" Preprocessing token list for filtering '(' and ')' in text
:type tokens: list
:param tokens: list of tokens
"""
identifier = '_!'
within_text = False
for (idx, tok) in enumerate(tokens):
if identifier in tok:
for _ in range(tok.count(identifier)):
within_text = not within_text
if ('(' in tok) and within_text:
tok = tok.replace('(', '-LB-')
if (')' in tok) and within_text:
tok = tok.replace(')', '-RB-')
tokens[idx] = tok
return tokens
@staticmethod
def create_text(lst):
""" Create text from a list of tokens
:type lst: list
:param lst: list of tokens
"""
newlst = []
for item in lst:
item = item.replace("_!", "")
newlst.append(item)
text = ' '.join(newlst)
# Lower-casing
return text.lower()
@staticmethod
def check_content(label, c):
""" Check whether the content is legal
:type label: string
:param label: parsing label, such 'span', 'leaf'
:type c: list
:param c: list of tokens
"""
if len(c) > 0:
raise ValueError("{} with content={}".format(label, c))
@staticmethod
def binarize_tree(tree):
""" Convert a general RST tree to a binary RST tree
:type tree: instance of SpanNode
:param tree: a general RST tree
"""
queue = [tree]
while queue:
node = queue.pop(0)
queue += node.nodelist
# Construct binary tree
if len(node.nodelist) == 2:
node.lnode = node.nodelist[0]
node.rnode = node.nodelist[1]
# Parent node
node.lnode.pnode = node
node.rnode.pnode = node
elif len(node.nodelist) > 2:
# Remove one node from the nodelist
node.lnode = node.nodelist.pop(0)
newnode = SpanNode(node.nodelist[0].prop)
newnode.nodelist += node.nodelist
# Right-branching
node.rnode = newnode
# Parent node
node.lnode.pnode = node
node.rnode.pnode = node
# Add to the head of the queue
# So the code will keep branching
# until the nodelist size is 2
queue.insert(0, newnode)
# Clear nodelist for the current node
node.nodelist = []
return tree
@staticmethod
def back_prop(tree, doc):
""" Starting from leaf node, propagating node
information back to root node
:type tree: SpanNode instance
:param tree: an binary RST tree
"""
tree_nodes = RstTree.BFTbin(tree)
tree_nodes.reverse()
for node in tree_nodes:
if (node.lnode is not None) and (node.rnode is not None):
# Non-leaf node
node.edu_span = RstTree.__getspaninfo(node.lnode, node.rnode)
node.text = RstTree.__gettextinfo(doc.edu_dict, node.edu_span)
if node.relation is None:
# If it is a new node created by binarization
if node.prop == 'Root':
pass
else:
node.relation = RstTree.__getrelationinfo(node.lnode, node.rnode)
node.form, node.nuc_span, node.nuc_edu = RstTree.__getforminfo(node.lnode, node.rnode)
node.height = max(node.lnode.height, node.rnode.height) + 1
node.max_depth = max(node.lnode.max_depth, node.rnode.max_depth)
if node.form == 'NS':
node.child_relation = node.rnode.relation
else:
node.child_relation = node.lnode.relation
if doc.token_dict[node.lnode.text[0]].sidx == doc.token_dict[node.rnode.text[-1]].sidx:
node.level = 0
elif doc.token_dict[node.lnode.text[0]].pidx == doc.token_dict[node.rnode.text[-1]].pidx:
node.level = 1
else:
node.level = 2
elif (node.lnode is None) and (node.rnode is not None):
raise ValueError("Unexpected left node")
elif (node.lnode is not None) and (node.rnode is None):
raise ValueError("Unexpected right node")
else:
# Leaf node
node.text = RstTree.__gettextinfo(doc.edu_dict, node.edu_span)
node.height = 0
node.max_depth = node.depth
node.level = 0
@staticmethod
def down_prop(tree):
"""
Starting from root node, propagating node information down to leaf nodes
:param tree: SpanNode instance
:param doc: Doc instance
:return: root node
"""
tree_nodes = RstTree.BFTbin(tree)
root_node = tree_nodes.pop(0)
root_node.depth = 0
for node in tree_nodes:
assert node.pnode.depth >= 0
node.depth = node.pnode.depth + 1
# for node in tree_nodes:
# if node.lnode is not None and node.rnode is not None:
# node.lnode.pnode = node
# node.rnode.pnode = node
# if node.form == 'NN':
# node.lnode.prop = "Nucleus"
# node.lnode.relation = node.child_relation
# node.rnode.prop = "Nucleus"
# node.rnode.relation = node.child_relation
# elif node.form == 'NS':
# node.lnode.prop = "Nucleus"
# node.lnode.relation = "span"
# node.rnode.prop = "Satellite"
# node.rnode.relation = node.child_relation
# elif node.form == 'SN':
# node.lnode.prop = "Satellite"
# node.lnode.relation = node.child_relation
# node.rnode.prop = "Nucleus"
# node.rnode.relation = "span"
# else:
# raise ValueError("Unrecognized form: {}".format(node.form))
@staticmethod
def BFT(tree):
""" Breadth-first | |
Documentation, chapter 5.2.2"""
input_channel_get = {
'01': 'VIDEO',
'02': 'S-VIDEO',
'06': 'COMPONENT',
'07': 'CVI 2 (not applicable)',
'08': 'VGA',
'09': 'HDMI 2',
'0A': 'HDMI or HDMI 1',
'0B': 'DVI-D',
'0C': 'Card DVI-D',
'0D': 'Display Port',
'0E': 'Card OPS',
'0F': 'USB'
}
input_channel_set = [
{'VIDEO': [0x01, 0x00]},
{'S-VIDEO': [0x01, 0x01]},
{'COMPONENT': [0x03, 0x00]},
{'CVI 2 (not applicable)': [0x03, 0x01]},
{'VGA': [0x05, 0x00]},
{'HDMI 2': [0x05, 0x01]},
{'Card DVI-D': [0x07, 0x00]},
{'Display Port': [0x07, 0x01]},
{'Card OPS': [0x08, 0x00]},
{'USB': [0x08, 0x01]},
{'HDMI or HDMI 1': [0x09, 0x00]},
{'DVI-D': [0x09, 0x01]},
]
class PhilipsSICP182(PhilipsSICP180):
""" Changed in V1.82 Documentation, page 12 chapter 5.2.2"""
input_channel_get = {
'01': 'VIDEO',
'02': 'S-VIDEO',
'06': 'COMPONENT',
'07': 'CVI 2 (not applicable)',
'08': 'VGA',
'09': 'HDMI 2',
'0A': 'HDMI or HDMI 1',
'0B': 'DVI-D',
'0C': 'Card DVI-D',
'0D': 'Display Port or Display Port 1',
'0E': 'Card OPS',
'0F': 'USB or USB 1',
'10': 'USB 2',
'11': 'Display Port 2'
}
input_channel_set = [
{'VIDEO': [0x01, 0x00]},
{'S-VIDEO': [0x01, 0x01]},
{'COMPONENT': [0x03, 0x00]},
{'CVI 2 (not applicable)': [0x03, 0x01]},
{'VGA': [0x05, 0x00]},
{'HDMI 2': [0x05, 0x01]},
{'Display Port 2': [0x06, 0x00]},
{'USB 2': [0x06, 0x01]},
{'Card DVI-D': [0x07, 0x00]},
{'Display Port or Display Port 1': [0x07, 0x01]},
{'Card OPS': [0x08, 0x00]},
{'USB or USB 1': [0x08, 0x01]},
{'HDMI or HDMI 1': [0x09, 0x00]},
{'DVI-D': [0x09, 0x01]},
]
class PhilipsSICP183(PhilipsSICP182):
""" Changed in V1.83 Documentation, chapter 5.2.2"""
input_channel_get = {
'01': 'VIDEO or VIDEO 1',
'02': 'S-VIDEO (not applicable)',
'03': 'VIDEO 2',
'06': 'COMPONENT',
'07': 'CVI 2 (not applicable)',
'08': 'VGA',
'09': 'HDMI 2',
'0A': 'HDMI or HDMI 1',
'0B': 'DVI-D (not applicable)',
'0C': 'Card DVI-D (not applicable)',
'0D': 'Display Port or Display Port 1 (not applicable)',
'0E': 'Card OPS (not applicable)',
'0F': 'USB or USB 1',
'10': 'USB 2 (not applicable)',
'11': 'Display Port 2 (not applicable)'
}
input_channel_set = [
{'VIDEO or VIDEO 1': [0x01, 0x00]},
{'S-VIDEO (not applicable)': [0x01, 0x01]},
{'VIDEO 2': [0x02, 0x00]},
{'COMPONENT': [0x03, 0x00]},
{'CVI 2 (not applicable)': [0x03, 0x01]},
{'VGA': [0x05, 0x00]},
{'HDMI 2': [0x05, 0x01]},
{'Display Port 2 (not applicable)': [0x06, 0x00]},
{'USB 2 (not applicable)': [0x06, 0x01]},
{'Card DVI-D (not applicable)': [0x07, 0x00]},
{'Display Port (not applicable)': [0x07, 0x01]},
{'Card OPS (not applicable)': [0x08, 0x00]},
{'USB or USB 1': [0x08, 0x01]},
{'HDMI or HDMI 1': [0x09, 0x00]},
{'DVI-D (not applicable)': [0x09, 0x01]},
]
class PhilipsSICP184(PhilipsSICP183):
""" Changed in V1.84 Documentation, chapter 5.2.2"""
input_channel_get = {
'01': 'VIDEO',
'02': 'S-VIDEO',
'06': 'COMPONENT',
'07': 'CVI 2 (not applicable)',
'08': 'VGA',
'09': 'HDMI 2',
'0A': 'HDMI or HDMI 1',
'0B': 'DVI-D',
'0C': 'Card DVI-D',
'0D': 'Display Port or Display Port 1',
'0E': 'Card OPS',
'0F': 'USB or USB 1',
'10': 'USB 2',
'11': 'Display Port 2'
}
input_channel_set = [
{'VIDEO or VIDEO 1': [0x01, 0x00]},
{'S-VIDEO (not applicable)': [0x01, 0x01]},
{'COMPONENT': [0x03, 0x00]},
{'CVI 2 (not applicable)': [0x03, 0x01]},
{'VGA': [0x05, 0x00]},
{'HDMI 2': [0x05, 0x01]},
{'Display Port 2': [0x06, 0x00]},
{'USB 2': [0x06, 0x01]},
{'Card DVI-D': [0x07, 0x00]},
{'Display Port or Display Port 1': [0x07, 0x01]},
{'Card OPS': [0x08, 0x00]},
{'USB or USB 1': [0x08, 0x01]},
{'HDMI or HDMI 1': [0x09, 0x00]},
{'DVI-D (not applicable)': [0x09, 0x01]},
]
def get_auto_detect_input_channel(self):
""" Get the auto detect mechanism.
Added in V1.84 documentation on page 13, chapter 5.3 """
raw = self.command(0xAF, list())
data = self.get_answer_data(raw)
if len(data) == 2:
status = int(data[1], 16)
if status == 0:
return self.AUTODETECT_INPUT_OFF
if status == 1:
return self.AUTODETECT_INPUT_ON
return self.AUTODETECT_INPUT_UNKNOWN
def set_auto_detect_input_channel(self, setting):
""" Set the auto detect mechanism. Allowed values are 0x00
and 0x01 according to V1.84 documentation on page 13, chapter 5.3 """
raw = self.command(0xAE, setting)
data = self.get_answer_data(raw)
return self.is_answer_ack(data)
class PhilipsSICP185(PhilipsSICP184):
pass
class PhilipsSICP186(PhilipsSICP185):
def get_answer_data(self, data):
""" Gets the part of the data that is used as data payload """
if self.is_answer_ack(data):
# The data part starts at the fourth byte and has a checksum
# Remove the first three bytes
del data[0] # First one
del data[0] # Was second, is first after removal of first
del data[0] # Was third, is first after removal of second
# Remove the last byte because it is the checksum
del data[-1]
# Return the resulting list
return data
else:
return list()
def command(self, command, data):
temp = list()
# Add the Display ID As Control
temp.append(self.display_id)
# Add 0 as Group which means, that the Control will by done
# by monitor ID and not by group. This is new to SICP 1.86
temp.append(0)
# Add the command
temp.append(command)
# Add the Data length
if len(data) > 0:
temp = temp + data
# Generate the message size
mapping = list()
length = len(temp) + 2
mapping.append(length)
mapping = mapping + temp
# Always add the checksum
checksum = self.calculate_checksum(mapping)
mapping.append(checksum)
# Convert the mapping to bytes
cmd = Tools.list_to_bytes(mapping)
# run the command
result = self.connection.runcommand(cmd)
return result
def get_lock_keys(self):
""" Get the status of possibly locked local keyboard
Changed in V1.86 documentation on page 10, chapter 4.2 """
raw = self.command(0x1B, list())
data = self.get_answer_data(raw)
status = int(data[2], 16)
if status == 0:
return self.LOCKED_NONE
elif status == 1:
return self.LOCKED_ALL
elif status == 2:
return self.LOCKED_ALL_BUT_VOLUME
elif status == 3:
return self.LOCKED_ALL_BUT_POWER
else:
return self.LOCKED_UNKNOWN
def get_lock_ir_remote(self):
""" Get the status of possibly locked IR remote
Changed in V1.86 documentation on page 10, chapter 4.2 """
raw = self.command(0x1B, list())
data = self.get_answer_data(raw)
status = int(data[1], 16)
if status == 0:
return self.LOCKED_NONE
elif status == 1:
return self.LOCKED_ALL
elif status == 2:
return self.LOCKED_ALL_BUT_VOLUME
elif status == 3:
return self.LOCKED_ALL_BUT_POWER
else:
return self.LOCKED_UNKNOWN
class PhilipsSICP187(PhilipsSICP186):
def get_auto_detect_input_channel(self):
""" Get the auto detect mechanism.
Changted in V1.87 documentation on page 19, chapter 5.3 """
raw = self.command(0xAF, list())
data = self.get_answer_data(raw)
if len(data) == 2:
status = int(data[1], 16)
if status == 0:
return self.AUTODETECT_INPUT_OFF
if status == 1:
return self.AUTODETECT_INPUT_ALL
if status == 5:
return self.AUTODETECT_INPUT_FAILOVER
return self.AUTODETECT_INPUT_UNKNOWN
pass
def get_failover_input_setting(self):
""" Get the input channel that are defined as failover.
Return contains a list of input channels as hex values
Added to V1.87 documentation on page 20, chapter 5.3.5 """
raw = self.command(0xA6, list())
data = self.get_answer_data(raw)
if len(data) > 0:
del data[0]
return data
def set_failover_input_setting(self, setting):
""" Set the input channel order for automatic failover.
Settings has to be a list. The command needs a list of exactly 14
input chanels. If the given variable differs, it will automatically
be trimmed to 14 entries. """
elements = len(setting)
needed = len(self.get_failover_input_setting())
if elements < needed:
for x in range(0, needed - elements):
setting.append(00)
elif elements > needed:
for x in range(0, elements - needed):
del setting[-1]
raw = self.command(0xA5, setting)
data = self.get_answer_data(raw)
return self.is_answer_ack(data)
class PhilipsSICP188(PhilipsSICP187):
""" Changed in V1.88 Documentation on page 18, chapter 5.2.2"""
input_channel_get = {
'01': 'VIDEO',
'02': 'S-VIDEO',
'03': 'COMPONENT',
'04': 'CVI 2 (not applicable)',
'05': 'VGA',
'06': 'HDMI 2',
'07': 'Display Port 2',
'08': 'USB 2',
'09': 'Card DVI-D',
'0A': 'Display Port 1',
'0B': 'Card OPS',
'0C': 'USB 1',
'0D': 'HDMI',
'0E': 'DVI-D',
'0F': 'HDMI 3',
'10': 'BROWSER',
'11': 'SMARCMS',
'12': 'DMS (Digital Media Server)',
'13': 'INTERNAL STORAGE',
'14': 'Reserved',
'15': 'Reserved',
}
input_channel_set = [
{'VIDEO': 0x01},
{'S-VIDEO': 0x02},
{'COMPONENT': 0x03},
{'CVI 2 (not applicable)': 0x04},
{'VGA': 0x05},
{'HDMI 2': 0x06},
{'Display Port 2': 0x07},
{'USB 2': 0x08},
{'Card DVI-D': 0x09},
{'Display Port 1': 0x0A},
{'Card OPS': 0x0B},
{'USB 1': 0x0C},
{'HDMI': 0x0D},
{'DVI-D': 0x0E},
{'HDMI 3': 0x0F},
{'BROWSER': 0x10},
{'SMARTCMS': 0x11},
{'DMS (Digital Media Server)': 0x12},
{'INTERNAL STORAGE': 0x13},
{'Reserved 0x14': 0x14},
{'Reserved 0x15': 0x15},
]
def get_platform_version(self):
""" Added to V1.88 documentation on page 12, chapter 3.2.1
"""
raw = self.command(0xA2, [2])
return Tools.ascii_hex_list_to_string(self.get_answer_data(raw[1:]))
def get_lock_keys(self):
""" Get the status of possibly locked local keyboard
Changed in V1.88 documentation on page 15, chapter 4.2.5 """
raw = self.command(0x1B, list())
data | |
open(filepath, 'w').close()
stdout.append('Created an empty file.')
if p.is_new and not p.source_filename:
# Do not run it if p.source_filename is defined, since svn copy was
# using above.
stdout.append(
self._check_output_svn(
['add', p.filename, '--force'], credentials=False))
for name, value in p.svn_properties:
if value is None:
stdout.append(
self._check_output_svn(
['propdel', '--quiet', name, p.filename],
credentials=False))
stdout.append('Property %s deleted.' % name)
else:
stdout.append(
self._check_output_svn(
['propset', name, value, p.filename], credentials=False))
stdout.append('Property %s=%s' % (name, value))
for prop, values in self.svn_config.auto_props.iteritems():
if fnmatch.fnmatch(p.filename, prop):
for value in values.split(';'):
if '=' not in value:
params = [value, '.']
else:
params = value.split('=', 1)
if params[1] == '*':
# Works around crbug.com/150960 on Windows.
params[1] = '.'
stdout.append(
self._check_output_svn(
['propset'] + params + [p.filename], credentials=False))
stdout.append('Property (auto) %s' % '='.join(params))
for post in post_processors:
post(self, p)
if verbose:
print p.filename
print align_stdout(stdout)
except OSError, e:
raise PatchApplicationFailed(p, '%s%s' % (align_stdout(stdout), e))
except subprocess.CalledProcessError, e:
raise PatchApplicationFailed(
p,
'While running %s;\n%s%s' % (
' '.join(e.cmd),
align_stdout(stdout),
align_stdout([getattr(e, 'stdout', '')])))
def commit(self, commit_message, user):
logging.info('Committing patch for %s' % user)
assert self.commit_user
assert isinstance(commit_message, unicode)
handle, commit_filename = tempfile.mkstemp(text=True)
try:
# Shouldn't assume default encoding is UTF-8. But really, if you are using
# anything else, you are living in another world.
os.write(handle, commit_message.encode('utf-8'))
os.close(handle)
# When committing, svn won't update the Revision metadata of the checkout,
# so if svn commit returns "Committed revision 3.", svn info will still
# return "Revision: 2". Since running svn update right after svn commit
# creates a race condition with other committers, this code _must_ parse
# the output of svn commit and use a regexp to grab the revision number.
# Note that "Committed revision N." is localized but subprocess2 forces
# LANGUAGE=en.
args = ['commit', '--file', commit_filename]
# realauthor is parsed by a server-side hook.
if user and user != self.commit_user:
args.extend(['--with-revprop', 'realauthor=%s' % user])
out = self._check_output_svn(args)
finally:
os.remove(commit_filename)
lines = filter(None, out.splitlines())
match = re.match(r'^Committed revision (\d+).$', lines[-1])
if not match:
raise PatchApplicationFailed(
None,
'Couldn\'t make sense out of svn commit message:\n' + out)
return int(match.group(1))
def _revert(self, revision):
"""Reverts local modifications or checks out if the directory is not
present. Use depot_tools's functionality to do this.
"""
flags = ['--ignore-externals']
if revision:
flags.extend(['--revision', str(revision)])
if os.path.isdir(self.project_path):
# This may remove any part (or all) of the checkout.
scm.SVN.Revert(self.project_path, no_ignore=True)
if os.path.isdir(self.project_path):
# Revive files that were deleted in scm.SVN.Revert().
self._check_call_svn(['update', '--force'] + flags,
timeout=FETCH_TIMEOUT)
else:
logging.info(
'Directory %s is not present, checking it out.' % self.project_path)
self._check_call_svn(
['checkout', self.svn_url, self.project_path] + flags, cwd=None,
timeout=FETCH_TIMEOUT)
return self._get_revision()
def _get_revision(self):
out = self._check_output_svn(['info', '.'])
revision = int(self._parse_svn_info(out, 'revision'))
if revision != self._last_seen_revision:
logging.info('Updated to revision %d' % revision)
self._last_seen_revision = revision
return revision
def revisions(self, rev1, rev2):
"""Returns the number of actual commits, not just the difference between
numbers.
"""
rev2 = rev2 or 'HEAD'
# Revision range is inclusive and ordering doesn't matter, they'll appear in
# the order specified.
try:
out = self._check_output_svn(
['log', '-q', self.svn_url, '-r', '%s:%s' % (rev1, rev2)])
except subprocess.CalledProcessError:
return None
# Ignore the '----' lines.
return len([l for l in out.splitlines() if l.startswith('r')]) - 1
class GitCheckout(CheckoutBase):
"""Manages a git checkout."""
def __init__(self, root_dir, project_name, remote_branch, git_url,
commit_user, post_processors=None):
super(GitCheckout, self).__init__(root_dir, project_name, post_processors)
self.git_url = git_url
self.commit_user = commit_user
self.remote_branch = remote_branch
# The working branch where patches will be applied. It will track the
# remote branch.
self.working_branch = 'working_branch'
# There is no reason to not hardcode origin.
self.remote = 'origin'
# There is no reason to not hardcode master.
self.master_branch = 'master'
def prepare(self, revision):
"""Resets the git repository in a clean state.
Checks it out if not present and deletes the working branch.
"""
assert self.remote_branch
assert self.git_url
if not os.path.isdir(self.project_path):
# Clone the repo if the directory is not present.
logging.info(
'Checking out %s in %s', self.project_name, self.project_path)
self._check_call_git(
['clone', self.git_url, '-b', self.remote_branch, self.project_path],
cwd=None, timeout=FETCH_TIMEOUT)
else:
# Throw away all uncommitted changes in the existing checkout.
self._check_call_git(['checkout', self.remote_branch])
self._check_call_git(
['reset', '--hard', '--quiet',
'%s/%s' % (self.remote, self.remote_branch)])
if revision:
try:
# Look if the commit hash already exist. If so, we can skip a
# 'git fetch' call.
revision = self._check_output_git(['rev-parse', revision]).rstrip()
except subprocess.CalledProcessError:
self._check_call_git(
['fetch', self.remote, self.remote_branch, '--quiet'])
revision = self._check_output_git(['rev-parse', revision]).rstrip()
self._check_call_git(['checkout', '--force', '--quiet', revision])
else:
branches, active = self._branches()
if active != self.master_branch:
self._check_call_git(
['checkout', '--force', '--quiet', self.master_branch])
self._sync_remote_branch()
if self.working_branch in branches:
self._call_git(['branch', '-D', self.working_branch])
return self._get_head_commit_hash()
def _sync_remote_branch(self):
"""Syncs the remote branch."""
# We do a 'git pull origin master:refs/remotes/origin/master' instead of
# 'git pull origin master' because from the manpage for git-pull:
# A parameter <ref> without a colon is equivalent to <ref>: when
# pulling/fetching, so it merges <ref> into the current branch without
# storing the remote branch anywhere locally.
remote_tracked_path = 'refs/remotes/%s/%s' % (
self.remote, self.remote_branch)
self._check_call_git(
['pull', self.remote,
'%s:%s' % (self.remote_branch, remote_tracked_path),
'--quiet'])
def _get_head_commit_hash(self):
"""Gets the current revision (in unicode) from the local branch."""
return unicode(self._check_output_git(['rev-parse', 'HEAD']).strip())
def apply_patch(self, patches, post_processors=None, verbose=False):
"""Applies a patch on 'working_branch' and switches to it.
The changes remain staged on the current branch.
Ignores svn properties and raise an exception on unexpected ones.
"""
post_processors = post_processors or self.post_processors or []
# It this throws, the checkout is corrupted. Maybe worth deleting it and
# trying again?
if self.remote_branch:
self._check_call_git(
['checkout', '-b', self.working_branch, '-t', self.remote_branch,
'--quiet'])
for index, p in enumerate(patches):
stdout = []
try:
filepath = os.path.join(self.project_path, p.filename)
if p.is_delete:
if (not os.path.exists(filepath) and
any(p1.source_filename == p.filename for p1 in patches[0:index])):
# The file was already deleted if a prior patch with file rename
# was already processed because 'git apply' did it for us.
pass
else:
stdout.append(self._check_output_git(['rm', p.filename]))
assert(not os.path.exists(filepath))
stdout.append('Deleted.')
else:
dirname = os.path.dirname(p.filename)
full_dir = os.path.join(self.project_path, dirname)
if dirname and not os.path.isdir(full_dir):
os.makedirs(full_dir)
stdout.append('Created missing directory %s.' % dirname)
if p.is_binary:
content = p.get()
with open(filepath, 'wb') as f:
f.write(content)
stdout.append('Added binary file %d bytes' % len(content))
cmd = ['add', p.filename]
if verbose:
cmd.append('--verbose')
stdout.append(self._check_output_git(cmd))
else:
# No need to do anything special with p.is_new or if not
# p.diff_hunks. git apply manages all that already.
cmd = ['apply', '--index', '-p%s' % p.patchlevel]
if verbose:
cmd.append('--verbose')
stdout.append(self._check_output_git(cmd, stdin=p.get(True)))
for key, value in p.svn_properties:
# Ignore some known auto-props flags through .subversion/config,
# bails out on the other ones.
# TODO(maruel): Read ~/.subversion/config and detect the rules that
# applies here to figure out if the property will be correctly
# handled.
stdout.append('Property %s=%s' % (key, value))
if not key in (
'svn:eol-style', 'svn:executable', 'svn:mime-type'):
raise patch.UnsupportedPatchFormat(
p.filename,
'Cannot apply svn property %s to file %s.' % (
key, p.filename))
for post in post_processors:
post(self, p)
if verbose:
print p.filename
print align_stdout(stdout)
except OSError, e:
raise PatchApplicationFailed(p, '%s%s' % (align_stdout(stdout), e))
except subprocess.CalledProcessError, e:
raise PatchApplicationFailed(
p,
'While running %s;\n%s%s' % (
' '.join(e.cmd),
align_stdout(stdout),
align_stdout([getattr(e, 'stdout', '')])))
found_files = self._check_output_git(
['diff', '--ignore-submodules',
'--name-only', '--staged']).splitlines(False)
if sorted(patches.filenames) != sorted(found_files):
extra_files = sorted(set(found_files) - set(patches.filenames))
unpatched_files = sorted(set(patches.filenames) - set(found_files))
if extra_files:
print 'Found extra files: %r' % (extra_files,)
if unpatched_files:
print 'Found unpatched files: %r' % (unpatched_files,)
def commit(self, commit_message, user):
"""Commits, updates the commit message and pushes."""
# TODO(hinoka): CQ no longer uses this, I think its deprecated.
# Delete this.
assert self.commit_user
assert isinstance(commit_message, unicode)
current_branch = self._check_output_git(
['rev-parse', '--abbrev-ref', 'HEAD']).strip()
assert current_branch == self.working_branch
commit_cmd = ['commit', '-m', commit_message]
if user and user != self.commit_user:
# We do not have the first or last name of the user, grab the username
# from the email and call it the original author's name.
# TODO(rmistry): Do not need the below if user is already in
# "Name <email>" format.
name = user.split('@')[0]
commit_cmd.extend(['--author', '%s <%s>' % (name, user)])
self._check_call_git(commit_cmd)
# Push to the remote repository.
self._check_call_git(
['push', 'origin', '%s:%s' % (self.working_branch, self.remote_branch),
'--quiet'])
# Get the revision after the | |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 01 14:23:18 2018
@authors: rafip, T.J.Ashby
"""
import yaml
from os.path import join
import pandas as pd
import numpy as np
from utils import SQLConnection, clean_nan_columns, get_features_from_labevents, get_features_from_chartevents
pd.set_option('mode.chained_assignment', None)
#
# Uses gzipped files only (no SQL)
#
def getfeaturesFromStaticTables(config):
print('generating features from non-events tables...')
data_dir = config['data_dir']
#get relevant data from ADMISSIONS table
print('\nImporting data from ADMISSIONS...')
path_admissions = join(data_dir, 'ADMISSIONS.csv.gz')
df_admissions = pd.read_csv(path_admissions)
trimmed_admissions = df_admissions[
['HADM_ID', 'SUBJECT_ID', 'ADMISSION_TYPE', 'INSURANCE', 'LANGUAGE', 'RELIGION', 'MARITAL_STATUS', 'ETHNICITY']]
# get relevant data from ICUSTAYS table
print('\nImporting data from ICUSTAYS...')
path_icustays = join(data_dir, 'ICUSTAYS.csv.gz')
df_icustays= pd.read_csv(path_icustays)
trimmed_icustays = df_icustays[['HADM_ID', 'ICUSTAY_ID', 'FIRST_CAREUNIT', 'LOS', 'INTIME', 'OUTTIME']]
# get relevant data from PATIENTS table
print('\nImporting data from PATIENTS...')
path_patients = join(data_dir, 'PATIENTS.csv.gz')
df_patients = pd.read_csv(path_patients)
trimmed_patients = df_patients[['SUBJECT_ID', 'GENDER', 'DOB']]
# get relevant data from PROCEDUREEVENTS_MV table
print('\nImporting data from PROCEDUREEVENTS_MV...')
path_procedures = join(data_dir, 'PROCEDUREEVENTS_MV.csv.gz')
df_procedures = pd.read_csv(path_procedures)
trimmed_procedures = df_procedures[['ICUSTAY_ID', 'ORDERCATEGORYNAME']]
trimmed_procedures['DUMMY'] = pd.Series([x for x in range(0, len(df_procedures))])
pivoted_procedures = trimmed_procedures.pivot_table(index='ICUSTAY_ID', columns='ORDERCATEGORYNAME', values='DUMMY',
fill_value=0).astype('bool').astype('int')
pivoted_procedures.columns = ['PROCEDURE_' + str(col_name) for col_name in pivoted_procedures.columns]
pivoted_procedures = pivoted_procedures.reset_index()
# get relevant data from PROCEDURES_ICD table
#
# The original code has a subtle bug here: it treats the ICD code column as
# an integer, which leads to the dropping of semantically important leading
# zeros, which further leads to name clashes in the code name space.
# - There is still (?) some ambiguity in the codes, but I'm assuming that
# _trailing_ zeros do get dropped; seems the most sensible
#
# I'm trying to fix this by forcing the code values to be read as
# strings. This should work for the code that uses vector embeddings, but
# the original code that does one-hot still converts things into integers
# and so will still be affected by this issue.
#
print('\nImporting data from PROCEDURES_ICD...')
path_procedures = join(data_dir, 'PROCEDURES_ICD.csv.gz')
df_ICD9 = pd.read_csv(path_procedures,
dtype={"ROW_ID": np.int32,
"SUBJECT_ID": np.int32,
"HADM_ID": np.int32,
"SEQ_NUM": np.int32,
"ICD9_CODE": str}
)
trimmed_ICD9 = df_ICD9[['HADM_ID','SEQ_NUM','ICD9_CODE']]
pivoted_ICD9 = trimmed_ICD9.pivot_table(index='HADM_ID', columns='ICD9_CODE', values='SEQ_NUM', fill_value=0).astype(
'bool').astype('int')
pivoted_ICD9.columns = ['ICD9_' + str(col_name) for col_name in pivoted_ICD9.columns]
pivoted_ICD9 = pivoted_ICD9.reset_index()
#
# merging dataframes
#
print('\nMerging data from ADMISSIONS, ICUSTAYS, PATIENTS, PROCEDUREEVENTS_MV, PROCEDURES_ICD...')
df_merged = trimmed_icustays.merge(trimmed_admissions, on='HADM_ID', how='left')
df_merged = df_merged.merge(trimmed_patients, on='SUBJECT_ID', how='left')
df_merged = df_merged.merge(pivoted_procedures, on='ICUSTAY_ID', how='left')
df_merged = df_merged.merge(pivoted_procedures, on='ICUSTAY_ID', how='left')
df_merged = df_merged.merge(pivoted_ICD9, on='HADM_ID',how='left')
#
# Calculating age and median correcting deidentified ages of ovelrly aged people
#
#
# There is an integer overflow bug if the original code is used with some of
# the later versions of pandas. This may not have been an issue with earlier
# versions of pandas (I haven't checked), but the version I use needed to be
# re-written to avoid the overflow.
#
# Original code:
#ages = (df_merged['INTIME'].astype('datetime64[ns]') - df_merged['DOB'].astype('datetime64[ns]')).dt.days / 365
#
# New version to avoid overflow:
s_intime = df_merged['INTIME'].astype('datetime64[s]')
s_dob = df_merged['DOB'].astype('datetime64[s]')
s_subtract = s_intime.copy()
patchCount = 0
for k in s_subtract.keys():
this_in = s_intime[k]
this_dob = s_dob[k]
if this_in.year - this_dob.year > 150:
print("Index {}: Patched DOB to unknown-old (i.e. anonymised >89); age will be 300 (year diff was {})".format(k, this_in.year - this_dob.year))
patchCount = patchCount + 1
s_subtract[k] = 300 * 365
else:
s_subtract[k] = (this_in - this_dob).days
print("Patched {} dodgy DOB entries".format(patchCount))
ages = (s_subtract) / 365
# (end of overflow bug fix)
df_merged['AGE'] = [age if age >= 0 else 91.4 for age in ages]
df_merged.drop(['DOB'], axis=1, inplace=True)
# removing minors from the data
df_merged = df_merged[(df_merged['AGE']>= 18)]
#Fixing missing values in PROCEDURE and ICD9 columns
NAN_MEANS_NO_COLUMNS_PREFIXES = list()
NAN_MEANS_NO_COLUMNS_PREFIXES.append('PROCEDURE')
NAN_MEANS_NO_COLUMNS_PREFIXES.append('ICD9')
for col_name in df_merged.columns:
for nan_means_no_column_prefix in NAN_MEANS_NO_COLUMNS_PREFIXES:
if col_name.startswith(nan_means_no_column_prefix):
df_merged[col_name].fillna(0, inplace=True)
return df_merged
#
# Accesses the Postgres DB
#
def getfeaturesFromEventsTables(config):
conn = SQLConnection(config)
# getting features from labevents
query_labmsmts = '''
select * from LABMEASURMENTS;
'''
df_labmsmts = conn.executeQuery(query_labmsmts)
#observations from labevents at discharge time
query_labslastmsmts = '''
select * from labslastmsmts;
'''
df_labslastmsmts = conn.executeQuery(query_labslastmsmts)
#all extacted features from labevents
df_labs_features = get_features_from_labevents(df_labmsmts, df_labslastmsmts)
# getting features from chartevents
query_chartsmsmts = '''
select * from CHARTSMEASURMENTS;
'''
df_chartsmsmts = conn.executeQuery(query_chartsmsmts)
# getting features from chartevents
query_chartslastmsmts = '''
select * from CHARTSLASTMSMTS;
'''
df_chartslastmsmts = conn.executeQuery(query_chartslastmsmts)
df_chart_features = get_features_from_chartevents(df_chartsmsmts, df_chartslastmsmts)
df_events_tables = pd.merge(df_labs_features, df_chart_features, how='left', on='icustay_id')
return df_events_tables
#
# Accesses the Postgres DB
#
def getfeaturesFromSeverityScoreConcepts(config):
"""
The scripts generate features from MIMIC-III concepts tables for severity scores
:param config:
:return:
"""
print("\n Extracting features from MIMIC-III concepts tables for severity scores")
conn = SQLConnection(config)
sapsii = conn.executeQuery('''select * from sapsii;''')
sapsii = sapsii[sapsii.icustay_id.isnull() == False]
sapsii['icustay_id'] = sapsii['icustay_id'].astype('int')
cols_to_remove = ['subject_id', 'hadm_id']
sapsii.drop(cols_to_remove, axis=1, inplace=True)
sofa = conn.executeQuery('''select * from sofa;''')
sofa = sofa[sofa.icustay_id.isnull() == False]
sofa['icustay_id'] = sofa['icustay_id'].astype('int')
cols_to_remove = ['subject_id', 'hadm_id']
sofa.drop(cols_to_remove, axis=1, inplace=True)
sirs = conn.executeQuery('''select * from sirs;''')
sirs = sirs[sirs.icustay_id.isnull() == False]
sirs['icustay_id'] = sirs['icustay_id'].astype('int')
cols_to_remove = ['subject_id', 'hadm_id']
sirs.drop(cols_to_remove, axis=1, inplace=True)
lods = conn.executeQuery('''select * from lods;''')
lods = lods[lods.icustay_id.isnull() == False]
lods['icustay_id'] = lods['icustay_id'].astype('int')
cols_to_remove = ['subject_id', 'hadm_id']
lods.drop(cols_to_remove, axis=1, inplace=True)
apsiii = conn.executeQuery('''select * from apsiii;''')
apsiii = apsiii[apsiii.icustay_id.isnull() == False]
apsiii['icustay_id'] = apsiii['icustay_id'].astype('int')
cols_to_remove = ['subject_id', 'hadm_id']
apsiii.drop(cols_to_remove, axis=1, inplace=True)
oasis = conn.executeQuery('''select * from oasis;''')
oasis = oasis[oasis.icustay_id.isnull() == False]
oasis['icustay_id'] = oasis['icustay_id'].astype('int')
cols_to_remove = ['subject_id', 'hadm_id']
oasis.drop(cols_to_remove, axis=1, inplace=True)
sapsii_sofa = pd.merge(sapsii, sofa, on='icustay_id')
# removing repeated column
sapsii_sofa.drop(['temp_score'], axis=1, inplace=True)
sapsii_sofa_sirs = pd.merge(sapsii_sofa, sirs, on='icustay_id')
# removing repeated column
sapsii_sofa_sirs.drop(['cardiovascular'], axis=1, inplace=True)
sapsii_sofa_sirs_lods = pd.merge(sapsii_sofa_sirs, lods, on='icustay_id')
sapsii_sofa_sirs_lods_apsiii = pd.merge(sapsii_sofa_sirs_lods, apsiii, on='icustay_id')
df_severity_scores = pd.merge(sapsii_sofa_sirs_lods_apsiii, oasis, on='icustay_id')
# filling score columns with 0 since in mimic db mean scores is taken as 0
score_columns = df_severity_scores.filter(regex='score').columns
df_severity_scores[score_columns] = df_severity_scores[score_columns].fillna(0)
# removing columns with > 50% NA value
df_severity_scores = clean_nan_columns(df_severity_scores, thres=60)
# cleaning up non-numeric columns from severity scores
df_severity_scores.drop(['icustay_age_group', 'preiculos'], axis=1, inplace=True)
return df_severity_scores
#
# Add a 'Short LOS' flag for stays < median length
# Replace existing LOS with log(#days), based on a recalculation
#
def addLOSFeature(df_MASTER_DATA):
# remove already existing LOS features
df_MASTER_DATA.drop(['LOS'], axis=1, inplace=True)
df_MASTER_DATA = df_MASTER_DATA[df_MASTER_DATA.OUTTIME.isnull() == False]
df_MASTER_DATA = df_MASTER_DATA[df_MASTER_DATA.INTIME.isnull() == False]
df_MASTER_DATA['INTIME'] = df_MASTER_DATA['INTIME'].astype('datetime64[ns]')
df_MASTER_DATA['OUTTIME'] = df_MASTER_DATA['OUTTIME'].astype('datetime64[ns]')
df_MASTER_DATA.reset_index(drop=True, inplace=True)
# Add new los feature
los = []
short_los = []
num_adms = df_MASTER_DATA.shape[0]
for idx in range(num_adms):
los_hours = (df_MASTER_DATA.OUTTIME[idx] - df_MASTER_DATA.INTIME[idx]).seconds / 3600 + \
(df_MASTER_DATA.OUTTIME[idx] - df_MASTER_DATA.INTIME[idx]).days * 24
los_days = los_hours / 24
los.append(los_days)
# if LOS less than median LOS
if los_days < 2.144537:
short_los.append(1)
else:
short_los.append(0)
df_MASTER_DATA['LOS'] = los
df_MASTER_DATA['Is_Short_LOS'] = short_los
df_MASTER_DATA['LOS'] = df_MASTER_DATA['LOS'].apply(lambda x: np.log(x))
return df_MASTER_DATA
#
# Adds a bunch of columns that make up the possible prediction target classes
# The columns are binary flags
#
def addTargetFeatures(df_MASTER_DATA):
"""
the function adds target labels to the dataset
:param df_MASTER_DATA:
:return:
"""
print('\n Adding target variables...')
df_MASTER_DATA = df_MASTER_DATA[df_MASTER_DATA.OUTTIME.isnull() == False]
df_MASTER_DATA = df_MASTER_DATA[df_MASTER_DATA.INTIME.isnull() == False]
df_MASTER_DATA['INTIME'] = df_MASTER_DATA['INTIME'].astype('datetime64[ns]')
df_MASTER_DATA['OUTTIME'] = df_MASTER_DATA['OUTTIME'].astype('datetime64[ns]')
df_MASTER_DATA = df_MASTER_DATA.sort_values(['SUBJECT_ID', 'INTIME', 'OUTTIME'], ascending=[True, True, True])
df_MASTER_DATA.reset_index(inplace=True, drop=True)
# Add targetr column to show if readmitted within different timeframes
df_MASTER_DATA = df_MASTER_DATA.assign(IsReadmitted_24hrs=0)
df_MASTER_DATA = df_MASTER_DATA.assign(IsReadmitted_48hrs=0)
df_MASTER_DATA = df_MASTER_DATA.assign(IsReadmitted_72hrs=0)
df_MASTER_DATA = df_MASTER_DATA.assign(IsReadmitted_7days=0)
df_MASTER_DATA = df_MASTER_DATA.assign(IsReadmitted_30days=0)
df_MASTER_DATA = df_MASTER_DATA.assign(IsReadmitted_Bounceback=0)
df_MASTER_DATA = df_MASTER_DATA.assign(Time_To_readmission=np.nan)
# total number of admissions
num_adms = df_MASTER_DATA.shape[0]
for idx in range(1, num_adms):
if df_MASTER_DATA.SUBJECT_ID[idx] == df_MASTER_DATA.SUBJECT_ID[idx - 1]:
# previous icu discharge time
prev_outtime = df_MASTER_DATA.OUTTIME[idx - 1]
# current icu admit time
curr_intime = df_MASTER_DATA.INTIME[idx]
readmit_time = curr_intime - prev_outtime
df_MASTER_DATA.loc[idx - 1, 'Time_To_readmission'] = readmit_time.seconds / (
3600 * 24) + readmit_time.days
if readmit_time.days <= 1:
df_MASTER_DATA.loc[idx - 1, 'IsReadmitted_24hrs'] = 1
if readmit_time.days <= 2:
df_MASTER_DATA.loc[idx - 1, 'IsReadmitted_48hrs'] = 1
if readmit_time.days <= 3:
df_MASTER_DATA.loc[idx - 1, 'IsReadmitted_72hrs'] = 1
if readmit_time.days <= 7:
df_MASTER_DATA.loc[idx - 1, 'IsReadmitted_7days'] = 1
if readmit_time.days <= 30:
df_MASTER_DATA.loc[idx - 1, 'IsReadmitted_30days'] = 1
# Check bouncebacks within 30 days
if df_MASTER_DATA.HADM_ID[idx] == df_MASTER_DATA.HADM_ID[idx - 1]:
df_MASTER_DATA.loc[idx - 1, 'IsReadmitted_Bounceback'] = 1
return df_MASTER_DATA
if __name__ == "__main__":
#
# Load | |
# Extra flags Andrew does not explicitly use but which are in the documentation
'-ignore_unrecognized_res',
'-in:file:fullatom',
'-fa_max_dis', '9.0',
'-ddg::dump_pdbs', 'true',
'-ddg::suppress_checkpointing', 'true',
]
softrep = []#['-ddg:weight_file', 'soft_rep_design']
# use -ddg:weight_file? hardrep = ['-score:weights standard', '-score:patch score12']
# use -ddg:weight_file? minnohardrep = ['-ddg::minimization_scorefunction', 'standard', '-ddg::minimization_patch', 'score12']
protocols1617 = [
'-ddg:weight_file', 'soft_rep_design',
'-ddg::iterations', '50',
'-ddg::local_opt_only', 'false',
'-ddg::min_cst', 'true',
'-ddg::mean', 'false',
'-ddg::min', 'true',
'-ddg::sc_min_only', 'false', # Backbone and sidechain minimization
'-ddg::ramp_repulsive', 'true',
]
ddGCmd = {
FieldNames_.Type : "CommandLine",
FieldNames_.Command : " ".join(['%(BIN_DIR)s/ddg_monomer.static.linuxgccrelease'] + commonstr + softrep + protocols1617 + extra_flags[1]),
FieldNames_.Description : "ddG for Protocol 16 from Kellogg et al.: %s" % score_function,
}
alreadyExists = self.ddGdb.locked_execute("SELECT ID FROM Command WHERE Type=%s AND Command=%s", parameters = (preminCmd[FieldNames_.Type], preminCmd[FieldNames_.Command]))
if not alreadyExists:
self.ddGdb.insertDict('Command', preminCmd)
preminCmdID = self.ddGdb.getLastRowID()
else:
preminCmdID = alreadyExists[0]["ID"]
alreadyExists = self.ddGdb.locked_execute("SELECT ID FROM Command WHERE Type=%s AND Command=%s", parameters = (ddGCmd[FieldNames_.Type], ddGCmd[FieldNames_.Command]))
if not alreadyExists:
self.ddGdb.insertDict('Command', ddGCmd)
ddGCmdID = self.ddGdb.getLastRowID()
else:
ddGCmdID = alreadyExists[0]["ID"]
# Protocol 16
protocol_name = "Protocol16 3.5.0 (%s)" % score_function
protocol_name = "Protocol16 3.5.1 (%s)" % score_function
alreadyExists = self.ddGdb.locked_execute("SELECT ID FROM Protocol WHERE ID=%s", parameters = (protocol_name,))
ddGTool = None
ddGDatabaseToolID = None
PreMinTool = self.ddGdb.locked_execute("SELECT ID FROM Tool WHERE Name=%s and GitHash=%s", parameters = ("Rosetta", git_hash))
if not PreMinTool:
d = {
'Name' : 'Rosetta',
'Version' : 'r%d' % fake_svn_revision,
'GitHash' : git_hash,
'SVNRevision' : fake_svn_revision,
'SVNRevisionInfo' : None,
}
self.ddGdb.insertDictIfNew('Tool', d, ['Name', 'Version', 'GitHash'])
results = self.ddGdb.locked_execute("SELECT ID FROM Tool WHERE Name=%s and GitHash=%s", parameters = ("Rosetta", git_hash))
if results:
PreMinTool = results[0]["ID"]
ddGTool = PreMinTool
ddGDatabaseToolID = PreMinTool
else:
raise Exception("Cannot add protocol %s." % protocol_name)
print("Inserting %s." % protocol_name)
proto = {
FieldNames_.ID : protocol_name,
FieldNames_.Description : "Protocol 16 from Kellogg, Leaver-Fay, and Baker (%s)" % score_function,
FieldNames_.ClassName : "ddG.protocols.LizKellogg.Protocol16v2",
FieldNames_.Publication : "Protocol:LizKelloggProtocol16",
}
self.ddGdb.insertDictIfNew('Protocol', proto, ['ID'])
# Create protocol graph
pstep = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID : "preminimization",
FieldNames_.ToolID : PreMinTool,
FieldNames_.CommandID : preminCmdID,
FieldNames_.DatabaseToolID : PreMinTool,
FieldNames_.DirectoryName : "",
FieldNames_.ClassName : None,
FieldNames_.Description : "Preminimization step",
}
self.ddGdb.insertDictIfNew('ProtocolStep', pstep, ['ProtocolID', 'StepID'])
self.ddGdb.execute("UPDATE ProtocolStep SET ClassName=%s WHERE ProtocolID=%s AND StepID=%s", parameters = ('ddG.protocols.LizKellogg.Protocol16v2Preminimization', protocol_name, 'preminimization'))
pstep = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID : "ddG",
FieldNames_.ToolID : ddGTool,
FieldNames_.CommandID : ddGCmdID,
FieldNames_.DatabaseToolID : ddGDatabaseToolID,
FieldNames_.DirectoryName : "",
FieldNames_.ClassName : None,
FieldNames_.Description : "ddG step",
}
self.ddGdb.insertDictIfNew('ProtocolStep', pstep, ['ProtocolID', 'StepID'])
self.ddGdb.execute("UPDATE ProtocolStep SET ClassName=%s WHERE ProtocolID=%s AND StepID=%s", parameters = ('GenericDDGTask', protocol_name, 'ddG'))
pedge = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : 'preminimization',
FieldNames_.ToStep : 'ddG',
}
self.ddGdb.insertDictIfNew('ProtocolGraphEdge', pedge, ['ProtocolID', 'FromStep', 'ToStep'])
# Create protocol cleaners
for fmask in ['*.cst', '*.out', '*.pdb', '*.mutfile', '*._traj*']:
pcleaner ={
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID: 'ddG',
FieldNames_.FileMask: fmask,
FieldNames_.Operation: 'keep',
FieldNames_.Arguments: None,
}
self.ddGdb.insertDictIfNew('ProtocolCleaner', pcleaner, ['ProtocolID', 'StepID', 'FileMask'])
for fmask in ['*.lst', '*.pdb']:
pcleaner ={
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID: 'preminimization',
FieldNames_.FileMask: fmask,
FieldNames_.Operation: 'keep',
FieldNames_.Arguments: None,
}
self.ddGdb.insertDictIfNew('ProtocolCleaner', pcleaner, ['ProtocolID', 'StepID', 'FileMask'])
# Create protocol parameters
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : "",
FieldNames_.ToStep : 'ddG',
FieldNames_.ParameterID: 'mutfile',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : "",
FieldNames_.ToStep : 'preminimization',
FieldNames_.ParameterID: 'in:file:l',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : 'preminimization',
FieldNames_.ToStep : 'ddG',
FieldNames_.ParameterID: 'constraints::cst_file',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : 'preminimization',
FieldNames_.ToStep : 'ddG',
FieldNames_.ParameterID: 'in:file:s',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
def protocol_16_complex(self):
FieldNames_ = ddGdb.FlatFieldNames
git_hash = '4858db45b295dcbb1202a6a91128919404c72569'
fake_svn_revision = 55534
score_function = 'talaris2013sc'
extra_flags = [
[
'@/netapp/home/shaneoconner/TalarisTestingFinal/talaris2013/score.flags',
'-score:weights', '/netapp/home/shaneoconner/TalarisTestingFinal/talaris2013/sp2_paper_talaris2013_scaled.wts',
],
[
'@/netapp/home/shaneoconner/TalarisTestingFinal/talaris2013/score.flags',
'-ddg:minimization_scorefunction', '/netapp/home/shaneoconner/TalarisTestingFinal/talaris2013/sp2_paper_talaris2013_scaled.wts',
],
]
assert(len(extra_flags) == 2)
assert(len(extra_flags[0]) == 3)
assert(len(extra_flags[1]) == 3)
# Command for protocol 16 preminimization
preminCmd = {
FieldNames_.Type : "CommandLine",
FieldNames_.Command : " ".join([
'%(BIN_DIR)s/minimize_with_cst.static.linuxgccrelease',
'-in:file:l', '%(in:file:l)s',
'-in:file:fullatom',
'-ignore_unrecognized_res',
'-fa_max_dis', '9.0',
'-database', '%(DATABASE_DIR)s',
'-ddg::harmonic_ca_tether', '0.5',
'-ddg::constraint_weight','1.0',
'-ddg::out_pdb_prefix', 'min_cst_0.5', # this plus the original pdb name will be used as a prefix for the output files
'-ddg::sc_min_only', 'false',
# extra flag for hard-coded params
'-in::file::extra_res_fa', '/netapp/home/klabqb3backrub/params/FPP.fa.params',
] + extra_flags[0]),
FieldNames_.Description : "Preminimization for Protocol 16 from Kellogg et al.: %s" % score_function,
}
# Command for protocol 16 DDG (high res protocol)
commonstr = [
'-in:file:s', '%(in:file:s)s',
'-ddg::mut_file', '%(mutfile)s',
'-constraints::cst_file', '%(constraints::cst_file)s',
'-database', '%(DATABASE_DIR)s',
# Extra flags Andrew does not explicitly use but which are in the documentation
'-ignore_unrecognized_res',
'-in:file:fullatom',
'-fa_max_dis', '9.0',
'-ddg::dump_pdbs', 'true',
'-ddg::suppress_checkpointing', 'true',
]
softrep = []#['-ddg:weight_file', 'soft_rep_design']
# use -ddg:weight_file? hardrep = ['-score:weights standard', '-score:patch score12']
# use -ddg:weight_file? minnohardrep = ['-ddg::minimization_scorefunction', 'standard', '-ddg::minimization_patch', 'score12']
protocols1617 = [
'-ddg:weight_file', 'soft_rep_design',
'-ddg::iterations', '50',
'-ddg::local_opt_only', 'false',
'-ddg::min_cst', 'true',
'-ddg::mean', 'false',
'-ddg::min', 'true',
'-ddg::sc_min_only', 'false', # Backbone and sidechain minimization
'-ddg::ramp_repulsive', 'true',
# extra flag for hard-coded params
'-in::file::extra_res_fa', '/netapp/home/klabqb3backrub/params/FPP.fa.params',
]
ddGCmd = {
FieldNames_.Type : "CommandLine",
FieldNames_.Command : " ".join(['%(BIN_DIR)s/ddg_monomer.static.linuxgccrelease'] + commonstr + softrep + protocols1617 + extra_flags[1]),
FieldNames_.Description : "ddG for Protocol 16 from Kellogg et al.: %s" % score_function,
}
alreadyExists = self.ddGdb.locked_execute("SELECT ID FROM Command WHERE Type=%s AND Command=%s", parameters = (preminCmd[FieldNames_.Type], preminCmd[FieldNames_.Command]))
if not alreadyExists:
self.ddGdb.insertDict('Command', preminCmd)
preminCmdID = self.ddGdb.getLastRowID()
else:
preminCmdID = alreadyExists[0]["ID"]
alreadyExists = self.ddGdb.locked_execute("SELECT ID FROM Command WHERE Type=%s AND Command=%s", parameters = (ddGCmd[FieldNames_.Type], ddGCmd[FieldNames_.Command]))
if not alreadyExists:
self.ddGdb.insertDict('Command', ddGCmd)
ddGCmdID = self.ddGdb.getLastRowID()
else:
ddGCmdID = alreadyExists[0]["ID"]
# Protocol 16
protocol_name = "Protocol16 3.5.1 (FPP complex) (%s)" % score_function
alreadyExists = self.ddGdb.locked_execute("SELECT ID FROM Protocol WHERE ID=%s", parameters = (protocol_name,))
ddGTool = None
ddGDatabaseToolID = None
results = self.ddGdb.locked_execute("SELECT ID FROM Tool WHERE Name=%s and GitHash=%s", parameters = ("Rosetta", git_hash))
if results:
PreMinTool = results[0]["ID"]
ddGTool = PreMinTool
ddGDatabaseToolID = PreMinTool
else:
raise Exception("Cannot add protocol %s." % protocol_name)
print("Inserting %s." % protocol_name)
proto = {
FieldNames_.ID : protocol_name,
FieldNames_.Description : "Protocol 16 from Kellogg, Leaver-Fay, and Baker (%s). Note: This is a hardcoded version designed to run with FPP for Aditya's project." % score_function,
FieldNames_.ClassName : "ddG.protocols.LizKellogg.Protocol16Complex",
FieldNames_.Publication : "Protocol:LizKelloggProtocol16",
}
self.ddGdb.insertDictIfNew('Protocol', proto, ['ID'])
# Create protocol graph
pstep = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID : "preminimization",
FieldNames_.ToolID : PreMinTool,
FieldNames_.CommandID : preminCmdID,
FieldNames_.DatabaseToolID : PreMinTool,
FieldNames_.DirectoryName : "",
FieldNames_.ClassName : 'ddG.protocols.LizKellogg.Protocol16v2Preminimization',
FieldNames_.Description : "Preminimization step",
}
self.ddGdb.insertDictIfNew('ProtocolStep', pstep, ['ProtocolID', 'StepID'])
pstep = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID : "ddG",
FieldNames_.ToolID : ddGTool,
FieldNames_.CommandID : ddGCmdID,
FieldNames_.DatabaseToolID : ddGDatabaseToolID,
FieldNames_.DirectoryName : "",
FieldNames_.ClassName : 'GenericDDGTask',
FieldNames_.Description : "ddG step",
}
self.ddGdb.insertDictIfNew('ProtocolStep', pstep, ['ProtocolID', 'StepID'])
pedge = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : 'preminimization',
FieldNames_.ToStep : 'ddG',
}
self.ddGdb.insertDictIfNew('ProtocolGraphEdge', pedge, ['ProtocolID', 'FromStep', 'ToStep'])
# Create protocol cleaners
for fmask in ['*.cst', '*.out', '*.pdb', '*.mutfile', '*._traj*']:
pcleaner ={
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID: 'ddG',
FieldNames_.FileMask: fmask,
FieldNames_.Operation: 'keep',
FieldNames_.Arguments: None,
}
self.ddGdb.insertDictIfNew('ProtocolCleaner', pcleaner, ['ProtocolID', 'StepID', 'FileMask'])
for fmask in ['*.lst', '*.pdb']:
pcleaner ={
FieldNames_.ProtocolID : protocol_name,
FieldNames_.StepID: 'preminimization',
FieldNames_.FileMask: fmask,
FieldNames_.Operation: 'keep',
FieldNames_.Arguments: None,
}
self.ddGdb.insertDictIfNew('ProtocolCleaner', pcleaner, ['ProtocolID', 'StepID', 'FileMask'])
# Create protocol parameters
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : "",
FieldNames_.ToStep : 'ddG',
FieldNames_.ParameterID: 'mutfile',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : "",
FieldNames_.ToStep : 'preminimization',
FieldNames_.ParameterID: 'in:file:l',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : 'preminimization',
FieldNames_.ToStep : 'ddG',
FieldNames_.ParameterID: 'constraints::cst_file',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
pparam = {
FieldNames_.ProtocolID : protocol_name,
FieldNames_.FromStep : 'preminimization',
FieldNames_.ToStep : 'ddG',
FieldNames_.ParameterID: 'in:file:s',
FieldNames_.Value: None,
}
self.ddGdb.insertDictIfNew('ProtocolParameter', pparam, ['ProtocolID', 'FromStep', 'ToStep', 'ParameterID'])
def insertKelloggLeaverFayBakerProtocols(self):
protocols = [{} for i in range(0,21)]
#
commonstr = [
'-in:file:s', '%(in:file:s)s',
'-resfile', '%(resfile)s',
'-database', '%(DATABASE_DIR)s',
'-ignore_unrecognized_res',
'-in:file:fullatom',
'-constraints::cst_file', '%(constraints::cst_file)s'
]
softrep = ['-score:weights', 'soft_rep_design']
hardrep = ['-score:weights standard', '-score:patch score12']
minnohardrep = ['-ddg::minimization_scorefunction', 'standard', '-ddg::minimization_patch', 'score12']
protocols1617 = [
'-ddg::weight_file', 'soft_rep_design',
'-ddg::iterations', '50',
'-ddg::local_opt_only', 'false',
'-ddg::min_cst', 'true',
'-ddg::mean', 'false',
'-ddg::min', 'true',
'-ddg::sc_min_only', 'false', # Backbone and sidechain minimization
'-ddg::ramp_repulsive', 'true',
'-ddg::minimization_scorefunction', 'standard',
'-ddg::minimization_patch', 'score12'
]
# Command for protocol 16 preminimization
preminCmd = {
FieldNames_.Type | |
# -*- coding: utf-8 -*-
import math
import matplotlib.pyplot as plt
# Precyzja Pythona dla liczba zmiennoprzecinkowych typu float to 53 bit
# w naszym przypadku niedokładności z tym związane są
# pomijalne. Nawet dla większych spadków i większych rozpiętości otrzymane
# wartości byłyby akceptowalne. Otrzymanie dokładniejszych wyników
# wiązałoby się z dużym nakładem pracy w odpowiednią implementację
# co z punktu widzenia konsturkcji budowlanych nie miałoby sensu.
# Dane projekotwe:
# spad SPAD_B [m]
SPAD_B = 39 - 19
# rozpiętość SPAN_A [m]
SPAN_A = 674
# STAN 0 - dane:
# obciążenie ciężarem własnym q [N/m]
q_0 = 6.09
# temperatura zawieszenia przewodu [*c]
T_0 = 10
# założona wartość zwisu w środku przęsła [m]
f_0_m = 20
# STAN 1 - dane:
# Obciążenie wiatrem prostopadle do cięgna [N/m]
qw_1 = 0
# Ciężar oblodzenia [N/m]:
q_1k = 3.59
# Obciażenie całkowite bez wiatru [N/m]:
q_tw = q_1k + q_0
# Obciążenie całkowiete z wiatrem [N/m]:
q_t = math.sqrt((q_1k+q_0)**2 + qw_1**2)
# Temperatura T1 [*C]
T_1 = -5
# Różnica temperatur:
T_d = T_1 - T_0
# Charakterystyka pręta ALF-8 525mm^2 (520-AL1/67-ST1A)
# Pole przewodu [m^2]
# A = 5.869 * 10 ** (-4)
A = 1.34303 * 10 ** (-4)
# Średni moduł sprężystości [kPa]
# E = 7.6881581189 * 10 ** 10 # = 76.881 GPa
E = 10.6579 * 10 ** 10
# Średni współczynnik odkształcalności [1/K]
# e_T = 1.944 * 10 ** (-5)
e_T = 1.525 * 10 ** (-5)
def calculateM(k):
"""
Wylicza wielkość mimośrodu m [m] na podstawie parametru
lini zwisu k [m]
:param k: parametr lini zwisu [m]
:return: wartośc mimośrodu [m]
"""
m = k * math.asinh(SPAD_B / (2 * k * math.sinh(SPAN_A / (2 * k))))
return m
def calculateMprime(k):
"""
Wyznacza odległość m' od środka cięgna do miejsca osiągnięcia ekstremum
przez funkcję zwisu
:param k:
:return:
"""
return k * math.asinh(SPAD_B / SPAN_A)
def calculateK(h, q):
"""
Wylicza parametr linii zwisania k w [m]
:param h: siła [N]
:param q: wartość obciążenia jednorodnego [N/m]
:return: parametr linii zwisania k w [m]
"""
return h / q
def calculatefm(h, q):
"""
wylicza wartość zwisu f.m dla podanej siły h [N], obciążenia q [N/m]
w środku cięgna
:param h: siła h w [N]
:return: zwis fm w [m]
"""
k = calculateK(h, q)
m = calculateM(k)
f_m = (SPAD_B / 2) + k * (
math.cosh((-0.5 * SPAN_A + m) / k) - math.cosh(m / k))
return f_m
def findh_s0(h_max, h_min, q):
"""
Znajduje siłę naciągu metodą numeryczną (wykorzystana metoda bisekcji),
należy podać granice górną i dolną dla metody bisekcji
:param h_max: Górna granica dla szukania siły naciągu
:param h_min: Dolna granica dla szukania siły naciągu
:param q: całkowite obciążenie kabla [N/m]
:return: h - siła naciągu, i - ilość potrzebnych iteracji
"""
i = 1
h = (h_min + h_max) / 2
print("Wstępne H = " + str(h))
f_m = calculatefm(h, q)
while (math.fabs(f_m - f_0_m) >= 1 * 10 ** -8):
if f_m < f_0_m:
h_max = h
else:
h_min = h
# print("iteracja #" + str(i) + " h_max = " + str(h_max) + " h_min = "
# + str(h_min) + " nowe H: " + str(h) + " f_m = " + str(f_m)
# + " docelowe: " + str(f_0_m))
h = (h_min + h_max) / 2
f_m = calculatefm(h, q)
i += 1
return h, i
def testIfCorrect(h, q):
f_0 = calculatefm(h, q)
if math.fabs(f_0 - f_0_m) <= 1 * 10 ** -4:
return True
else:
return False
def calculateInclination(x, h_s0, q):
"""
Wyznacza nachylenie krzywej na podstawie współrzędnej X oraz
siły naciągu ze stanu 0
:param x: współrzędna X od lewej podpory
:param h_s0: siła naciągu ze stanu 0
:param q: całkowite obciążenie kabla [N/m]
:return: nachylenie krzywej do płaszczyzny w danym miejscu
"""
return math.sinh(x / calculateK(h_s0, q))
def calculateGeoLength(H, q):
"""
Wylicza długość kabla wykorzystując warunki geometryczne dla danego H w [N]
:param H: Siła naciągu kabla w [N]
:param q: całkowite obciążenie kabla [N/m]
:return: Zwraca geometryczną długość kabla w [m]
"""
k = calculateK(H, q)
if SPAD_B != 0:
# length = SPAN_A * (1 + ((SPAN_A ** 2) * (q_0 ** 2)) / (24 * (H ** 2))
# + (SPAD_B ** 2) / (2 * (SPAN_A ** 2)))
length = math.sqrt(SPAD_B**2 + (2 * k * math.sinh(SPAN_A / (2*k)))**2)
return length
elif SPAD_B == 0:
length = 2 * k * math.sinh(SPAN_A / (2 * k))
return length
def getBasePoints(m_0):
"""
Zwraca wspolrzedne podpór.
:param m_0: mimosród m
:return: (xa, xb) wsp podpór
"""
return math.fabs(-0.5 * SPAN_A + m_0), 0.5 * SPAN_A + m_0
def calculateForces(x, H, L, q):
"""
Zwraca dwie siły w danym stanie, pierwszą jest składowa pionowa V_0(x)
a drugą jest siła pozioma od wiatru W(x)
:param x: miejsce dla którego wyznaczone zostaną siły [m]
:param H: siła naciągu [N]
:param q: obciążenie całkowite [N/m]
:return: siła pionowa V, siła pozioma W (od wiatru)
"""
k_0 = calculateK(H, q)
# Nachylenie:
tg_A = math.sinh(x / k_0)
# Pionowa składowa siły naciągu V_0b dla stanu 0 w punkcie x:
F_0 = H * tg_A # w [N]
sV = q_tw / q_t
sH = qw_1 / q_t
V_X = q_tw*k_0*math.sinh(x / k_0)
W_X = 0.5*qw_1*L
return round(V_X, 2), round(W_X, 2)
def findH_S1(h_max, h_min, l_0, h_0):
"""
:param h_max: Górna granica poszukiwanej siły naciągu
:param h_min: Dolna granica poszukiwanej siły naciągu
:param l_0: Geometryczna długość kabla
:param h_0: Siła naciągu ze stanu 0
:param q: Całkowite obciążenie na 1 mb
:return: Siła naciągu, ilość iteracji, długość kabli
Siła naciągu, ilość iteracji, długość kabli
"""
# print("$$$$$$$$$$$$ S1 CALCULATION START $$$$$$$$$$$$$$$$$")
k_min = calculateK(h_min, q_t)
l_geo_min = math.sqrt(
SPAD_B ** 2 + (2 * k_min * math.sinh(SPAN_A / (2 * k_min))) ** 2)
l_phys_min = l_0 * (1 + e_T * T_d + (h_min - h_0) / (E * A))
# print("Wstępne min: k_min = %s, l_geo_min = %s, l_phys_min = %s"
# % (k_min, l_geo_min, l_phys_min))
k_max = calculateK(h_max, q_t)
l_geo_max = math.sqrt(
SPAD_B ** 2 + (2 * k_max * math.sinh(SPAN_A / (2 * k_max))) ** 2)
l_phys_max = l_0 * (1 + e_T * T_d + (h_max - h_0) / (E * A))
# print("Wstępne max: k_max = %s, l_geo_max = %s, l_phys_max = %s"
# % (k_max, l_geo_max, l_phys_max))
l_phys = 1
l_geo = 5
i = 1
if l_geo_min >= l_phys_min and l_phys_max >= l_geo_max:
while (math.fabs(l_phys - l_geo) >= 1 * 10 ** (-8)):
h = (h_max + h_min) / 2
k = calculateK(h, q_t)
l_geo = math.sqrt(
SPAD_B ** 2 + (2 * k * math.sinh(SPAN_A / (2 * k))) ** 2)
l_phys = l_0 * (1 + e_T * T_d + (h - h_0) / (E * A))
i += 1
# print("Iteracja #%s - H = %s, l_geo = %s, l_phys = %s"
# % (i, h, l_geo, l_phys))
if l_geo >= l_phys:
h_min = h
else:
h_max = h
# print("$$$$$$$$$$$$$$$$ CALCULATION FINISHED $$$$$$$$$$$$$$$")
return h, i, l_geo
else:
print("Dla podanych wartości granicznych rozwiązanie nie istnieje")
return False
def calculateHorizontalForce(qwind, span):
"""
:param qwind: Obciążenie wiatrem prostopadłe do lini cięgna [N/m]
:param span: Rozpiętość pozioma cięgna (długość w rzucie "z góry") [m]
"""
return (qwind * span) / 2
def calculateTensileForce(Hten, H, V):
"""
:param Hten: Wstępna siła naciągu [N]
:param H: Pozioma siła na podporze [N]
:param V: Pionowa siła na podporze [N]
"""
return math.sqrt(Hten**2 + H**2 + V**2)
def drawCatenary(H, x_A, x_B):
"""
:param H: Siła naciągu [N]
:param x_A: Współrzędna X podpory A [m]
:param x_B: Współrzędna X podpory B [m]
:param q: Obciążenie całkowite [N/m]
:return: void
"""
k = calculateK(H, q_t)
m = int(round(calculateM(k), 0))
def catenary(x):
# Krzywa łańcuchowa:
f = -k * (math.cosh(x / k) - 1)
return f
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
plotRangeGenerator = drange((-SPAN_A / 2) + m, (SPAN_A / 2) + m, 2)
plotRange = [i for i in plotRangeGenerator]
Yaxis = [-catenary(x) for x in plotRange]
| |
import datetime
import math
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
from skimage.transform import rescale
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
class RawData:
""" Keeps imported data, their paths and visualization parameters together.
"""
def __init__(self, HYPER):
""" Initializes paths and miscellaneous other values """
# provide the path to where data is stored
path_to_data = '../data/'
# provide the path to where images are stored
self.path_to_images = '../images/'
if not os.path.exists(self.path_to_images):
os.mkdir(self.path_to_images)
# provide the saving path to where computational graph images are stored
self.path_to_computational_graphs = self.path_to_images + 'computational graphs/'
if not os.path.exists(self.path_to_computational_graphs):
os.mkdir(self.path_to_computational_graphs)
# determines how many exemplar subplots to show for load profiles
self.n_subplots = 10
# set the range of the histogram bins and the total number of bins.
self.histo_range = (0, 1)
# set the number of channels
if HYPER.GREY_SCALE:
self.n_channels = 1
else:
self.n_channels = 3
# set the path to electric load profile data
if HYPER.LABELS == 'feature_scaled' or HYPER.LABELS == 'random_scaled':
self.path_to_building_year_profile_folder = (
path_to_data
+ 'public/'
+ HYPER.PROFILE_SET
+ '/building-year profiles/'
+ HYPER.LABELS
+ '/'
)
else:
self.path_to_building_year_profile_folder = (
path_to_data
+ 'private/'
+ HYPER.PROFILE_SET
+ '/building-year profiles/'
+ HYPER.LABELS
+ '/'
)
# set the path to meteo data
self.path_to_meteo_data_folder = (
path_to_data
+ 'public/'
+ HYPER.PROFILE_SET
+ '/meteo data/'
)
# set the path to aerial imagery data
if HYPER.PRIVATE_DATA_ACCESS:
self.path_to_aerial_imagery_folder = (
path_to_data
+ 'private/'
+ HYPER.PROFILE_SET
+ '/building imagery/'
+ 'padded/'
)
else:
if HYPER.SPATIAL_FEATURES == 'histogram':
self.path_to_aerial_imagery_folder = (
path_to_data
+ 'public/'
+ HYPER.PROFILE_SET
+ '/building imagery/'
+ 'histogram/'
)
elif HYPER.SPATIAL_FEATURES == 'average':
self.path_to_aerial_imagery_folder = (
path_to_data
+ 'public/'
+ HYPER.PROFILE_SET
+ '/building imagery/'
+ 'average/'
)
if HYPER.GREY_SCALE:
self.path_to_aerial_imagery_folder = (
self.path_to_aerial_imagery_folder
+ 'greyscale/'
)
else:
self.path_to_aerial_imagery_folder = (
self.path_to_aerial_imagery_folder
+ 'rgb/'
)
# create the experiment name string for saving models and results
if HYPER.RED_CAND_DATA_ACT_LRN:
self.experiment_name = 'delta1'
else:
self.experiment_name = 'delta0'
if HYPER.UPD_VAL_DATA_ACT_LRN:
self.experiment_name += '_valup1'
else:
self.experiment_name += '_valup0'
# create a results folder if not existent
path_to_results = '../results/'
if not os.path.exists(path_to_results):
os.mkdir(path_to_results)
path_to_results += (
HYPER.PROFILE_SET
+ '/'
)
if not os.path.exists(path_to_results):
os.mkdir(path_to_results)
# set the path to the folder for saving trained encoders
self.path_to_encoder_weights = path_to_results + 'encoder weights/'
if not os.path.exists(self.path_to_encoder_weights):
os.mkdir(self.path_to_encoder_weights)
self.path_to_encoder_weights += self.experiment_name + '/'
if not os.path.exists(self.path_to_encoder_weights):
os.mkdir(self.path_to_encoder_weights)
# set the path to the folder for saving trained AL models
if HYPER.SAVE_ACT_LRN_MODELS:
self.path_to_AL_models = path_to_results +'models/'
if not os.path.exists(self.path_to_AL_models):
os.mkdir(self.path_to_AL_models)
self.path_to_AL_models += self.experiment_name + '/'
if not os.path.exists(self.path_to_AL_models):
os.mkdir(self.path_to_AL_models)
# set the path to the folder for saving AL test results or hyper params
if HYPER.SAVE_ACT_LRN_RESULTS or HYPER.SAVE_HYPER_PARAMS:
self.path_to_AL_results = path_to_results + 'values/'
if not os.path.exists(self.path_to_AL_results):
os.mkdir(self.path_to_AL_results)
self.path_to_AL_results += self.experiment_name + '/'
if not os.path.exists(self.path_to_AL_results):
os.mkdir(self.path_to_AL_results)
# set the path to the folder for saving AL test results or hyper params
if HYPER.SAVE_ACT_LRN_TEST_SAMPLE:
self.path_to_AL_test_samples = path_to_results + 'samples/'
if not os.path.exists(self.path_to_AL_test_samples):
os.mkdir(self.path_to_AL_test_samples)
self.path_to_AL_test_samples += self.experiment_name + '/'
if not os.path.exists(self.path_to_AL_test_samples):
os.mkdir(self.path_to_AL_test_samples)
def show_attributes(self):
""" Prints out the attribute names of this class when called.
"""
for attr, value in self.__dict__.items():
print(attr)
class Dataset:
""" Keeps a dataset together that contains multiple elements of X_t, X_s,
X_s1, X_st and Y.
"""
def __init__(self, X_t_ord_1D, X_t, X_s, X_s1, X_st, Y):
""" Initializes a complete set of attributes for a new Dataset object.
Note that missing values should conventionally be passed with a zero.
"""
self.X_t_ord_1D = X_t_ord_1D
self.X_t = X_t
self.X_s = X_s
self.X_s1 = X_s1
self.X_st = X_st
self.Y = Y
self.n_datapoints = len(X_t)
def randomize(self):
""" Randomizes all data entries.
"""
# create random array
random_array = np.arange(len(self.X_t))
# shuffle random array
np.random.shuffle(random_array)
if type(self.X_t_ord_1D) != int and type(self.X_t_ord_1D) != float:
self.X_t_ord_1D = self.X_t_ord_1D[random_array]
if type(self.X_t) != int and type(self.X_t) != float:
self.X_t = self.X_t[random_array]
if type(self.X_s) != int and type(self.X_s) != float:
self.X_s = self.X_s[random_array]
if type(self.X_s1) != int and type(self.X_s1) != float:
self.X_s1 = self.X_s1[random_array]
if type(self.X_st) != int and type(self.X_st) != float:
self.X_st = self.X_st[random_array]
if type(self.Y) != int and type(self.Y) != float:
self.Y = self.Y[random_array]
if hasattr(self, "Y_copy"):
if type(self.Y_copy) != int and type(self.Y_copy) != float:
self.Y_copy = self.Y_copy[random_array]
def show_attributes(self):
""" Prints out the attribute names of this class when called.
"""
for attr, value in self.__dict__.items():
print(attr)
def import_consumption_profiles(HYPER, raw_data, silent=False, plot=True):
""" Imports consumption profiles and appends the following lists to the
raw_data object: building_year_profiles_list, building_id_list,
cluster_id_list, year_id_list, building_id_set, cluster_id_set, year_id_set,
cluster_year_set.
"""
if not silent:
# tell us what we are doing
print('Importing consumption profiles')
# create a progress bar
progbar = tf.keras.utils.Progbar(len(HYPER.PROFILE_YEARS))
# save dataframes here instead of under distinct names
building_year_profiles_list = []
memory_demand_GB = 0
# iterate over the list of years for which we want to import load profiles
for index_year, year in enumerate(HYPER.PROFILE_YEARS):
# get the path to currently iterated building-year profiles file
path_to_building_year_profile_files = (
raw_data.path_to_building_year_profile_folder
+ str(year)
+ ' building-year profiles.csv'
)
# load currently iterated file
df = pd.read_csv(path_to_building_year_profile_files)
# get the building IDs of profiles
building_ids = df.columns.values[1:]
# get the cluster IDs of profiles and drop the row
cluster_ids = df.iloc[0, 1:].values.astype(int)
# get the years of profiles and replace them with the year ID used here
years = df.iloc[1, 1:].values.astype(int)
year_ids = years
year_ids[:] = index_year
# drop the cluder id and year rows
df = df.drop([0, 1])
# rename the 'building ID' column name to 'local_time' so as to match
# the meteo files' column name for search later
df = df.rename(columns={'building ID': 'local_time'})
# get the time stamp of the imported meters
time_stamp_profiles = df.pop('local_time')
# set the new time stamp as index
df = df.set_index(time_stamp_profiles)
# create a random array
randomize = np.arange(len(building_ids))
np.random.shuffle(randomize)
# shuffle ID orders with same random array
building_ids = building_ids[randomize]
cluster_ids = cluster_ids[randomize]
year_ids = year_ids[randomize]
# shorten the considered ID lists according to your chosen number of
# considerable profiles per year
n_profiles = math.ceil(HYPER.PROFILES_PER_YEAR * len(building_ids))
building_ids = building_ids[: n_profiles]
cluster_ids = cluster_ids[: n_profiles]
year_ids = year_ids[: n_profiles]
# shorten dataframe accordingly
df = df[building_ids]
# check if first iteration
if year == HYPER.PROFILE_YEARS[0]:
# if yes, set the id lists equal to currently iterated lists
building_id_list = building_ids
cluster_id_list = cluster_ids
year_id_list = year_ids
else:
# if not, concatenate previous lists with currently iterated lists
building_id_list = np.concatenate((building_id_list, building_ids))
cluster_id_list = np.concatenate((cluster_id_list, cluster_ids))
year_id_list = np.concatenate((year_id_list, year_ids))
# append dataframe
building_year_profiles_list.append(df)
# accumulate the memory demand of building-year profiles we imported
memory_demand_GB = memory_demand_GB + df.memory_usage().sum() * 1e-9
if not silent:
# increment the progress bar
progbar.add(1)
# get the set of building IDs, i.e. drop the duplicate entries
building_id_set = set(building_id_list)
# get the set of building IDs, i.e. drop the duplicate entries
cluster_id_set = set(cluster_id_list)
# get the set of year IDs. Note: this should be equal to PROFILE_YEARS
year_id_set = set(year_id_list)
# get set of cluster-year ID combinations
cluster_year_set = set(list(zip(cluster_id_list, year_id_list)))
raw_data.building_year_profiles_list = building_year_profiles_list
raw_data.building_id_list = building_id_list
raw_data.cluster_id_list = cluster_id_list
raw_data.year_id_list = year_id_list
raw_data.building_id_set = building_id_set
raw_data.cluster_id_set = cluster_id_set
raw_data.year_id_set = year_id_set
raw_data.cluster_year_set = cluster_year_set
# Tell us how much RAM we are occupying with the just imported profiles
print(
'The',
len(building_id_list),
'imported electric load profiles demand a total amount of',
memory_demand_GB,
'GB of RAM',
)
if plot:
# set the number of subplots to the minimum of the desired value and the
# actually available profiles for plotting
n_subplots = min(raw_data.n_subplots, len(df.columns))
# Visualize some profiles
_ = df.iloc[:, :n_subplots].plot(
| |
<filename>pytorch_src/trainer.py
'''
file: trainer.py
date: 2018_05_07
author: zhangxiong(<EMAIL>)
'''
import sys
from model import HMRNetBase
from Discriminator import Discriminator
from config import args
import config
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from dataloader.AICH_dataloader import AICH_dataloader
from dataloader.COCO2017_dataloader import COCO2017_dataloader
from dataloader.hum36m_dataloader import hum36m_dataloader
from dataloader.lsp_dataloader import LspLoader
from dataloader.lsp_ext_dataloader import LspExtLoader
from dataloader.mosh_dataloader import mosh_dataloader
from dataloader.mpi_inf_3dhp_dataloader import mpi_inf_3dhp_dataloader
from dataloader.eval_dataloader import eval_dataloader
from util import align_by_pelvis, batch_rodrigues, copy_state_dict
from timer import Clock
import time
import datetime
from collections import OrderedDict
import os
class HMRTrainer(object):
def __init__(self):
self.pix_format = 'NCHW'
self.normalize = True
self.flip_prob = 0.5
self.use_flip = False
self.w_smpl = torch.ones((config.args.eval_batch_size)).float().cuda()
self._build_model()
self._create_data_loader()
def _create_data_loader(self):
self.loader_2d = self._create_2d_data_loader(config.train_2d_set)
self.loader_mosh = self._create_adv_data_loader(config.train_adv_set)
self.loader_3d = self._create_3d_data_loader(config.train_3d_set)
def _build_model(self):
print('start building modle.')
'''
load pretrain model
'''
generator = HMRNetBase()
model_path = config.pre_trained_model['generator']
if os.path.exists(model_path):
copy_state_dict(
generator.state_dict(),
torch.load(model_path),
prefix = 'module.'
)
else:
print('model {} not exist!'.format(model_path))
discriminator = Discriminator()
model_path = config.pre_trained_model['discriminator']
if os.path.exists(model_path):
copy_state_dict(
discriminator.state_dict(),
torch.load(model_path),
prefix = 'module.'
)
else:
print('model {} not exist!'.format(model_path))
self.generator = nn.DataParallel(generator).cuda()
self.discriminator = nn.DataParallel(discriminator).cuda()
self.e_opt = torch.optim.Adam(
self.generator.parameters(),
lr = args.e_lr,
weight_decay = args.e_wd
)
self.d_opt = torch.optim.Adam(
self.discriminator.parameters(),
lr = args.d_lr,
weight_decay = args.d_wd
)
self.e_sche = torch.optim.lr_scheduler.StepLR(
self.e_opt,
step_size = 500,
gamma = 0.9
)
self.d_sche = torch.optim.lr_scheduler.StepLR(
self.d_opt,
step_size = 500,
gamma = 0.9
)
print('Finished building model.')
def _create_2d_data_loader(self, data_2d_set):
data_set = []
for data_set_name in data_2d_set:
data_set_path = config.data_set_path[data_set_name]
if data_set_name == 'coco':
coco = COCO2017_dataloader(
data_set_path = data_set_path,
use_crop = True,
scale_range = [1.05, 1.3],
use_flip = self.use_flip,
only_single_person = False,
min_pts_required = 7,
max_intersec_ratio = 0.5,
pix_format = self.pix_format,
normalize = self.normalize,
flip_prob = self.flip_prob
)
data_set.append(coco)
elif data_set_name == 'lsp':
lsp = LspLoader(
data_set_path = data_set_path,
use_crop = True,
scale_range = [1.05, 1.3],
use_flip = self.use_flip,
pix_format = self.pix_format,
normalize = self.normalize,
flip_prob = self.flip_prob
)
data_set.append(lsp)
elif data_set_name == 'lsp_ext':
lsp_ext = LspExtLoader(
data_set_path = data_set_path,
use_crop = True,
scale_range = [1.1, 1.2],
use_flip = self.use_flip,
pix_format = self.pix_format,
normalize = self.normalize,
flip_prob = self.flip_prob
)
data_set.append(lsp_ext)
elif data_set_name == 'ai-ch':
ai_ch = AICH_dataloader(
data_set_path = data_set_path,
use_crop = True,
scale_range = [1.1, 1.2],
use_flip = self.use_flip,
only_single_person = False,
min_pts_required = 5,
max_intersec_ratio = 0.1,
pix_format = self.pix_format,
normalize = self.normalize,
flip_prob = self.flip_prob
)
data_set.append(ai_ch)
else:
msg = 'invalid 2d dataset'
sys.exit(msg)
con_2d_dataset = ConcatDataset(data_set)
return DataLoader(
dataset = con_2d_dataset,
batch_size = config.args.batch_size,
shuffle = True,
drop_last = True,
pin_memory = True,
num_workers = config.args.num_worker
)
def _create_3d_data_loader(self, data_3d_set):
data_set = []
for data_set_name in data_3d_set:
data_set_path = config.data_set_path[data_set_name]
if data_set_name == 'mpi-inf-3dhp':
mpi_inf_3dhp = mpi_inf_3dhp_dataloader(
data_set_path = data_set_path,
use_crop = True,
scale_range = [1.1, 1.2],
use_flip = self.use_flip,
min_pts_required = 5,
pix_format = self.pix_format,
normalize = self.normalize,
flip_prob = self.flip_prob
)
data_set.append(mpi_inf_3dhp)
elif data_set_name == 'hum3.6m':
hum36m = hum36m_dataloader(
data_set_path = data_set_path,
use_crop = True,
scale_range = [1.1, 1.2],
use_flip = self.use_flip,
min_pts_required = 5,
pix_format = self.pix_format,
normalize = self.normalize,
flip_prob = self.flip_prob
)
data_set.append(hum36m)
else:
msg = 'invalid 3d dataset'
sys.exit(msg)
con_3d_dataset = ConcatDataset(data_set)
return DataLoader(
dataset = con_3d_dataset,
batch_size = config.args.batch_3d_size,
shuffle = True,
drop_last = True,
pin_memory = True,
num_workers = config.args.num_worker
)
def _create_adv_data_loader(self, data_adv_set):
data_set = []
for data_set_name in data_adv_set:
data_set_path = config.data_set_path[data_set_name]
if data_set_name == 'mosh':
mosh = mosh_dataloader(
data_set_path = data_set_path,
use_flip = self.use_flip,
flip_prob = self.flip_prob
)
data_set.append(mosh)
else:
msg = 'invalid adv dataset'
sys.exit(msg)
con_adv_dataset = ConcatDataset(data_set)
return DataLoader(
dataset = con_adv_dataset,
batch_size = config.args.adv_batch_size,
shuffle = True,
drop_last = True,
pin_memory = True,
)
def _create_eval_data_loader(self, data_eval_set):
data_set = []
for data_set_name in data_eval_set:
data_set_path = config.data_set_path[data_set_name]
if data_set_name == 'up3d':
up3d = eval_dataloader(
data_set_path = data_set_path,
use_flip = False,
flip_prob = self.flip_prob,
pix_format = self.pix_format,
normalize = self.normalize
)
data_set.append(up3d)
else:
msg = 'invalid eval dataset'
sys.exit(msg)
con_eval_dataset = ConcatDataset(data_set)
return DataLoader(
dataset = con_eval_dataset,
batch_size = config.args.eval_batch_size,
shuffle = False,
drop_last = False,
pin_memory = True,
num_workers = config.args.num_worker
)
def train(self):
def save_model(result):
exclude_key = 'module.smpl'
def exclude_smpl(model_dict):
result = OrderedDict()
for (k, v) in model_dict.items():
if exclude_key in k:
continue
result[k] = v
return result
parent_folder = args.save_folder
if not os.path.exists(parent_folder):
os.makedirs(parent_folder)
title = result['title']
generator_save_path = os.path.join(parent_folder, title + 'generator.pkl')
torch.save(exclude_smpl(self.generator.state_dict()), generator_save_path)
disc_save_path = os.path.join(parent_folder, title + 'discriminator.pkl')
torch.save(exclude_smpl(self.discriminator.state_dict()), disc_save_path)
with open(os.path.join(parent_folder, title + '.txt'), 'w') as fp:
fp.write(str(result))
#pre_best_loss = None
torch.backends.cudnn.benchmark = True
loader_2d, loader_3d, loader_mosh = iter(self.loader_2d), iter(self.loader_3d), iter(self.loader_mosh)
e_opt, d_opt = self.e_opt, self.d_opt
self.generator.train()
self.discriminator.train()
for iter_index in range(config.args.iter_count):
try:
data_2d = next(loader_2d)
except StopIteration:
loader_2d = iter(self.loader_2d)
data_2d = next(loader_2d)
try:
data_3d = next(loader_3d)
except StopIteration:
loader_3d = iter(self.loader_3d)
data_3d = next(loader_3d)
try:
data_mosh = next(loader_mosh)
except StopIteration:
loader_mosh = iter(self.loader_mosh)
data_mosh = next(loader_mosh)
image_from_2d, image_from_3d = data_2d['image'], data_3d['image']
sample_2d_count, sample_3d_count, sample_mosh_count = image_from_2d.shape[0], image_from_3d.shape[0], data_mosh['theta'].shape[0]
images = torch.cat((image_from_2d, image_from_3d), dim = 0).cuda()
generator_outputs = self.generator(images)
loss_kp_2d, loss_kp_3d, loss_shape, loss_pose, e_disc_loss, d_disc_loss, d_disc_real, d_disc_predict = self._calc_loss(generator_outputs, data_2d, data_3d, data_mosh)
e_loss = loss_kp_2d + loss_kp_3d + loss_shape + loss_pose + e_disc_loss
d_loss = d_disc_loss
e_opt.zero_grad()
e_loss.backward()
e_opt.step()
d_opt.zero_grad()
d_loss.backward()
d_opt.step()
loss_kp_2d = float(loss_kp_2d)
loss_shape = float(loss_shape / args.e_shape_ratio)
loss_kp_3d = float(loss_kp_3d / args.e_3d_kp_ratio)
loss_pose = float(loss_pose / args.e_pose_ratio)
e_disc_loss = float(e_disc_loss / args.d_disc_ratio)
d_disc_loss = float(d_disc_loss / args.d_disc_ratio)
d_disc_real = float(d_disc_real / args.d_disc_ratio)
d_disc_predict = float(d_disc_predict / args.d_disc_ratio)
e_loss = loss_kp_2d + loss_kp_3d + loss_shape + loss_pose + e_disc_loss
d_loss = d_disc_loss
iter_msg = OrderedDict(
[
('time',datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')),
('iter',iter_index),
('e_loss', e_loss),
('2d_loss',loss_kp_2d),
('3d_loss',loss_kp_3d),
('shape_loss',loss_shape),
('pose_loss', loss_pose),
('e_disc_loss',float(e_disc_loss)),
('d_disc_loss',float(d_disc_loss)),
('d_disc_real', float(d_disc_real)),
('d_disc_predict', float(d_disc_predict))
]
)
print(iter_msg)
if iter_index % 500 == 0:
iter_msg['title'] = '{}_{}_'.format(iter_msg['iter'], iter_msg['e_loss'])
save_model(iter_msg)
def _calc_loss(self, generator_outputs, data_2d, data_3d, data_mosh):
def _accumulate_thetas(generator_outputs):
thetas = []
for (theta, verts, j2d, j3d, Rs) in generator_outputs:
thetas.append(theta)
return torch.cat(thetas, 0)
sample_2d_count, sample_3d_count, sample_mosh_count = data_2d['kp_2d'].shape[0], data_3d['kp_2d'].shape[0], data_mosh['theta'].shape
data_3d_theta, w_3d, w_smpl = data_3d['theta'].cuda(), data_3d['w_3d'].float().cuda(), data_3d['w_smpl'].float().cuda()
total_predict_thetas = _accumulate_thetas(generator_outputs)
(predict_theta, predict_verts, predict_j2d, predict_j3d, predict_Rs) = generator_outputs[-1]
real_2d, real_3d = torch.cat((data_2d['kp_2d'], data_3d['kp_2d']), 0).cuda(), data_3d['kp_3d'].float().cuda()
predict_j2d, predict_j3d, predict_theta = predict_j2d, predict_j3d[sample_2d_count:, :], predict_theta[sample_2d_count:, :]
loss_kp_2d = self.batch_kp_2d_l1_loss(real_2d, predict_j2d[:,:14,:]) * args.e_loss_weight
loss_kp_3d = self.batch_kp_3d_l2_loss(real_3d, predict_j3d[:,:14,:], w_3d) * args.e_3d_loss_weight * args.e_3d_kp_ratio
real_shape, predict_shape = data_3d_theta[:, 75:], predict_theta[:, 75:]
loss_shape = self.batch_shape_l2_loss(real_shape, predict_shape, w_smpl) * args.e_3d_loss_weight * args.e_shape_ratio
real_pose, predict_pose = data_3d_theta[:, 3:75], predict_theta[:, 3:75]
loss_pose = self.batch_pose_l2_loss(real_pose.contiguous(), predict_pose.contiguous(), w_smpl) * args.e_3d_loss_weight * args.e_pose_ratio
e_disc_loss = self.batch_encoder_disc_l2_loss(self.discriminator(total_predict_thetas)) * args.d_loss_weight * args.d_disc_ratio
mosh_real_thetas = data_mosh['theta'].cuda()
fake_thetas = total_predict_thetas.detach()
fake_disc_value, real_disc_value = self.discriminator(fake_thetas), self.discriminator(mosh_real_thetas)
d_disc_real, d_disc_fake, d_disc_loss = self.batch_adv_disc_l2_loss(real_disc_value, fake_disc_value)
d_disc_real, d_disc_fake, d_disc_loss = d_disc_real * args.d_loss_weight * args.d_disc_ratio, d_disc_fake * args.d_loss_weight * args.d_disc_ratio, d_disc_loss * args.d_loss_weight * args.d_disc_ratio
return loss_kp_2d, loss_kp_3d, loss_shape, loss_pose, e_disc_loss, d_disc_loss, d_disc_real, d_disc_fake
"""
purpose:
calc L1 error
Inputs:
kp_gt : N x K x 3
kp_pred: N x K x 2
"""
def batch_kp_2d_l1_loss(self, real_2d_kp, predict_2d_kp):
kp_gt = real_2d_kp.view(-1, 3)
kp_pred = predict_2d_kp.contiguous().view(-1, 2)
vis = kp_gt[:, 2]
k = torch.sum(vis) * 2.0 + 1e-8
dif_abs = torch.abs(kp_gt[:, :2] - kp_pred).sum(1)
return torch.matmul(dif_abs, vis) * 1.0 / k
'''
purpose:
calc mse * 0.5
Inputs:
real_3d_kp : N x k x 3
fake_3d_kp : N x k x 3
w_3d : N x 1
'''
def batch_kp_3d_l2_loss(self, real_3d_kp, fake_3d_kp, w_3d):
shape = real_3d_kp.shape
k = torch.sum(w_3d) * shape[1] * 3.0 * 2.0 + 1e-8
#first align it
real_3d_kp, fake_3d_kp = align_by_pelvis(real_3d_kp), align_by_pelvis(fake_3d_kp)
kp_gt = real_3d_kp
kp_pred = fake_3d_kp
kp_dif = (kp_gt - kp_pred) ** 2
return torch.matmul(kp_dif.sum(1).sum(1), w_3d) * 1.0 / k
'''
purpose:
calc mse * 0.5
Inputs:
real_shape : N x 10
fake_shape : N x 10
w_shape : N x 1
'''
def batch_shape_l2_loss(self, real_shape, fake_shape, w_shape):
k = torch.sum(w_shape) * 10.0 * 2.0 + 1e-8
shape_dif = (real_shape - fake_shape) ** 2
return torch.matmul(shape_dif.sum(1), w_shape) * 1.0 / k
'''
Input:
real_pose : N x 72
fake_pose : N | |
#!/usr/bin/env python
# CREATED:2014-01-18 14:09:05 by <NAME> <<EMAIL>>
# unit tests for util routines
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except:
pass
import platform
import numpy as np
import scipy.sparse
import pytest
import warnings
import librosa
from test_core import srand
np.set_printoptions(precision=3)
def test_example_audio_file():
assert os.path.exists(librosa.util.example_audio_file())
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize("y", [np.random.randn(32)])
@pytest.mark.parametrize("axis", [0, -1])
def test_frame1d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(y, frame_length=frame_length, hop_length=hop_length, axis=axis)
if axis == -1:
y_frame = y_frame.T
for i in range(y_frame.shape[0]):
assert np.allclose(y_frame[i], y[i * hop_length : (i * hop_length + frame_length)])
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize(
"y, axis", [(np.asfortranarray(np.random.randn(16, 32)), -1), (np.ascontiguousarray(np.random.randn(16, 32)), 0)]
)
def test_frame2d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(y, frame_length=frame_length, hop_length=hop_length, axis=axis)
if axis == -1:
y_frame = y_frame.T
y = y.T
for i in range(y_frame.shape[0]):
assert np.allclose(y_frame[i], y[i * hop_length : (i * hop_length + frame_length)])
def test_frame_0stride():
x = np.arange(10)
xpad = x[np.newaxis]
xpad2 = np.atleast_2d(x)
xf = librosa.util.frame(x, 3, 1)
xfpad = librosa.util.frame(xpad, 3, 1)
xfpad2 = librosa.util.frame(xpad2, 3, 1)
assert np.allclose(xf, xfpad)
assert np.allclose(xf, xfpad2)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_badtype():
librosa.util.frame([1, 2, 3, 4], frame_length=2, hop_length=1)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("x", [np.arange(16)])
def test_frame_too_short(x, axis):
librosa.util.frame(x, frame_length=17, hop_length=1, axis=axis)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_bad_hop():
librosa.util.frame(np.arange(16), frame_length=4, hop_length=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [1, 2])
def test_frame_bad_axis(axis):
librosa.util.frame(np.zeros((3, 3, 3)), frame_length=2, hop_length=1, axis=axis)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x, axis", [(np.zeros((4, 4), order="C"), -1), (np.zeros((4, 4), order="F"), 0)])
def test_frame_bad_contiguity(x, axis):
librosa.util.frame(x, frame_length=2, hop_length=1, axis=axis)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
def test_pad_center(y, m, axis, mode):
n = m + y.shape[axis]
y_out = librosa.util.pad_center(y, n, axis=axis, mode=mode)
n_len = y.shape[axis]
n_pad = int((n - n_len) / 2)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(n_pad, n_pad + n_len)
assert np.allclose(y, y_out[tuple(eq_slice)])
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("n", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_pad_center_fail(y, n, axis, mode):
librosa.util.pad_center(y, n, axis=axis, mode=mode)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [-5, 0, 5])
@pytest.mark.parametrize("axis", [0, -1])
def test_fix_length(y, m, axis):
n = m + y.shape[axis]
y_out = librosa.util.fix_length(y, n, axis=axis)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(y.shape[axis])
if n > y.shape[axis]:
assert np.allclose(y, y_out[tuple(eq_slice)])
else:
assert np.allclose(y[tuple(eq_slice)], y)
@pytest.mark.parametrize("frames", [np.arange(20, 100, step=15)])
@pytest.mark.parametrize("x_min", [0, 20])
@pytest.mark.parametrize("x_max", [20, 70, 120])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames(frames, x_min, x_max, pad):
f_fix = librosa.util.fix_frames(frames, x_min=x_min, x_max=x_max, pad=pad)
if x_min is not None:
if pad:
assert f_fix[0] == x_min
assert np.all(f_fix >= x_min)
if x_max is not None:
if pad:
assert f_fix[-1] == x_max
assert np.all(f_fix <= x_max)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("frames", [np.arange(-20, 100)])
@pytest.mark.parametrize("x_min", [None, 0, 20])
@pytest.mark.parametrize("x_max", [None, 0, 20])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames_fail_negative(frames, x_min, x_max, pad):
librosa.util.fix_frames(frames, x_min, x_max, pad)
@pytest.mark.parametrize("norm", [np.inf, -np.inf, 0, 0.5, 1.0, 2.0, None])
@pytest.mark.parametrize("ndims,axis", [(1, 0), (1, -1), (2, 0), (2, 1), (2, -1), (3, 0), (3, 1), (3, 2), (3, -1)])
def test_normalize(ndims, norm, axis):
srand()
X = np.random.randn(*([4] * ndims))
X_norm = librosa.util.normalize(X, norm=norm, axis=axis)
# Shape and dtype checks
assert X_norm.dtype == X.dtype
assert X_norm.shape == X.shape
if norm is None:
assert np.allclose(X, X_norm)
return
X_norm = np.abs(X_norm)
if norm == np.inf:
values = np.max(X_norm, axis=axis)
elif norm == -np.inf:
values = np.min(X_norm, axis=axis)
elif norm == 0:
# XXX: normalization here isn't quite right
values = np.ones(1)
else:
values = np.sum(X_norm ** norm, axis=axis) ** (1.0 / norm)
assert np.allclose(values, np.ones_like(values))
@pytest.mark.parametrize("norm", ["inf", -0.5, -2])
@pytest.mark.parametrize("X", [np.ones((3, 3))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_badnorm(X, norm):
librosa.util.normalize(X, norm=norm)
@pytest.mark.parametrize("badval", [np.nan, np.inf, -np.inf])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_bad_input(badval):
X = np.ones((3, 3))
X[0] = badval
librosa.util.normalize(X, norm=np.inf, axis=0)
@pytest.mark.parametrize("fill", [7, "foo"])
@pytest.mark.parametrize("X", [np.ones((2, 2))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_badfill(X, fill):
librosa.util.normalize(X, fill=fill)
@pytest.mark.parametrize("x", [np.asarray([[0, 1, 2, 3]])])
@pytest.mark.parametrize(
"threshold, result",
[(None, [[0, 1, 1, 1]]), (1, [[0, 1, 1, 1]]), (2, [[0, 1, 1, 1]]), (3, [[0, 1, 2, 1]]), (4, [[0, 1, 2, 3]])],
)
def test_normalize_threshold(x, threshold, result):
assert np.allclose(librosa.util.normalize(x, threshold=threshold), result)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x", [np.asarray([[0, 1, 2, 3]])])
@pytest.mark.parametrize("threshold", [0, -1])
def test_normalize_threshold_fail(x, threshold):
librosa.util.normalize(x, threshold=threshold)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_fill_l0():
X = np.ones((2, 2))
librosa.util.normalize(X, fill=True, norm=0)
@pytest.mark.parametrize("norm", [1, 2, np.inf])
@pytest.mark.parametrize("X", [np.zeros((3, 3))])
def test_normalize_fill_allaxes(X, norm):
Xn = librosa.util.normalize(X, fill=True, axis=None, norm=norm)
if norm is np.inf:
assert np.allclose(Xn, 1)
else:
assert np.allclose(np.sum(Xn ** norm) ** (1.0 / norm), 1)
@pytest.mark.parametrize("norm", [1, 2, np.inf])
@pytest.mark.parametrize("X", [np.zeros((3, 3))])
def test_normalize_nofill(X, norm):
Xn = librosa.util.normalize(X, fill=False, norm=norm)
assert np.allclose(Xn, 0)
@pytest.mark.parametrize("X", [np.asarray([[0.0, 1], [0, 1]])])
@pytest.mark.parametrize("norm,value", [(1, 0.5), (2, np.sqrt(2) / 2), (np.inf, 1)])
@pytest.mark.parametrize("threshold", [0.5, 2])
def test_normalize_fill(X, threshold, norm, value):
Xn = librosa.util.normalize(X, fill=True, norm=norm, threshold=threshold)
assert np.allclose(Xn, value)
@pytest.mark.parametrize("ndim", [1, 3])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("value", [None, np.min, np.mean, np.max])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_axis_sort_badndim(ndim, axis, index, value):
data = np.zeros([2] * ndim)
librosa.util.axis_sort(data, axis=axis, index=index, value=value)
@pytest.mark.parametrize("ndim", [2])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("value", [None, np.min, np.mean, np.max])
def test_axis_sort(ndim, axis, index, value):
srand()
data = np.random.randn(*([10] * ndim))
if index:
Xsorted, idx = librosa.util.axis_sort(data, axis=axis, index=index, value=value)
cmp_slice = [slice(None)] * ndim
cmp_slice[axis] = idx
assert np.allclose(data[tuple(cmp_slice)], Xsorted)
else:
Xsorted = librosa.util.axis_sort(data, axis=axis, index=index, value=value)
compare_axis = np.mod(1 - axis, 2)
if value is None:
value = np.argmax
sort_values = value(Xsorted, axis=compare_axis)
assert np.allclose(sort_values, np.sort(sort_values))
@pytest.mark.parametrize(
"int_from, int_to",
[
(np.asarray([[0, 2], [0, 4], [3, 6]]), np.empty((0, 2), dtype=int)),
(np.empty((0, 2), dtype=int), np.asarray([[0, 2], [0, 4], [3, 6]])),
],
)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_intervals_empty(int_from, int_to):
librosa.util.match_intervals(int_from, int_to)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_intervals_strict_fail():
int_from = np.asarray([[0, 3], [2, 4], [5, 7]])
int_to = np.asarray([[0, 2], [0, 4]])
librosa.util.match_intervals(int_from, int_to, strict=True)
@pytest.mark.parametrize("int_from", [np.asarray([[0, 3], [2, 4], [5, 7]])])
@pytest.mark.parametrize("int_to", [np.asarray([[0, 2], [0, 4], [3, 6]])])
@pytest.mark.parametrize("matches", [np.asarray([1, 1, 2])])
def test_match_intervals_strict(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=True)
assert np.array_equal(matches, test_matches)
@pytest.mark.parametrize("int_from", [np.asarray([[0, 3], [2, 4], [5, 7]])])
@pytest.mark.parametrize(
"int_to,matches",
[
(np.asarray([[0, 2], [0, 4], [3, 6]]), np.asarray([1, 1, 2])),
(np.asarray([[0, 2], [0, 4]]), np.asarray([1, 1, 1])),
],
)
def test_match_intervals_nonstrict(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=False)
assert np.array_equal(matches, test_matches)
@pytest.mark.parametrize("n", [1, 5, 20, 100])
@pytest.mark.parametrize("m", [1, 5, 20, 100])
def test_match_events(n, m):
srand()
ev1 = np.abs(np.random.randn(n))
ev2 = np.abs(np.random.randn(m))
match = librosa.util.match_events(ev1, ev2)
for i in range(len(match)):
values = np.asarray([np.abs(ev1[i] - e2) for e2 in ev2])
assert not np.any(values < values[match[i]])
@pytest.mark.parametrize("ev1,ev2", [(np.array([]), np.arange(5)), (np.arange(5), np.array([]))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_events_failempty(ev1, ev2):
librosa.util.match_events(ev1, ev2)
@pytest.mark.parametrize("events_from", [np.asarray([5, 15, 25])])
@pytest.mark.parametrize("events_to", [np.asarray([0, 10, 20, 30])])
@pytest.mark.parametrize("left,right,target", [(False, True, [10, 20, 30]), (True, False, [0, 10, 20])])
def test_match_events_onesided(events_from, events_to, left, right, target):
events_from = np.asarray(events_from)
events_to = np.asarray(events_to)
match = librosa.util.match_events(events_from, events_to, left=left, right=right)
assert np.allclose(target, events_to[match])
def test_match_events_twosided():
events_from = np.asarray([5, 15, 25])
events_to = np.asarray([5, 15, 25, 30])
match = librosa.util.match_events(events_from, events_to, left=False, right=False)
assert np.allclose(match, [0, 1, 2])
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize(
"events_from,events_to,left,right",
[
([40, 15, 25], [0, 10, 20, 30], False, True), # right-sided fail
([-1, 15, 25], [0, 10, 20, 30], True, False), # left-sided fail
([-1, 15, 25], [0, 10, 20, 30], False, False), # two-sided fail
],
)
def test_match_events_onesided_fail(events_from, events_to, left, right):
events_from = np.asarray(events_from)
events_to = np.asarray(events_to)
librosa.util.match_events(events_from, events_to, left=left, right=right)
@pytest.mark.parametrize("ndim, axis", [(n, m) for n in range(1, 5) for m in range(n)])
def test_localmax(ndim, axis):
srand()
data = np.random.randn(*([7] * ndim))
lm = librosa.util.localmax(data, axis=axis)
for hits in np.argwhere(lm):
for offset in [-1, 1]:
compare_idx = hits.copy()
compare_idx[axis] += offset
if compare_idx[axis] < 0:
continue
if compare_idx[axis] >= data.shape[axis]:
continue
if offset < 0:
assert data[tuple(hits)] > data[tuple(compare_idx)]
else:
assert data[tuple(hits)] >= data[tuple(compare_idx)]
@pytest.mark.parametrize("x", [np.random.randn(_) ** 2 for _ in [1, 5, 10, 100]])
@pytest.mark.parametrize("pre_max", [0, 1, 10])
@pytest.mark.parametrize("post_max", [1, 10])
@pytest.mark.parametrize("pre_avg", [0, 1, 10])
@pytest.mark.parametrize("post_avg", [1, 10])
@pytest.mark.parametrize("wait", [0, 1, 10])
@pytest.mark.parametrize("delta", [0.05, 100.0])
def test_peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
peaks = librosa.util.peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait)
for i in peaks:
# Test 1: is it a peak in this window?
s = i - pre_max
if s < 0:
s = 0
t = i + post_max
diff = x[i] - np.max(x[s:t])
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 2: is it a big enough peak to count?
s = i - pre_avg
if s < 0:
s = 0
t = i + post_avg
diff = x[i] - (delta + np.mean(x[s:t]))
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 3: peak separation
assert not np.any(np.diff(peaks) <= wait)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x", [np.random.randn(_) ** 2 for _ in [1, 5, 10, 100]])
@pytest.mark.parametrize(
"pre_max,post_max,pre_avg,post_avg,delta,wait",
[
(-1, 1, 1, 1, 0.05, 1), # negative pre-max
(1, -1, 1, 1, 0.05, 1), # negative post-max
(1, 0, 1, 1, 0.05, 1), # 0 post-max
(1, 1, -1, 1, 0.05, 1), # negative pre-avg
(1, 1, 1, -1, 0.05, 1), # negative post-avg
(1, 1, 1, 0, 0.05, 1), # zero post-avg
(1, 1, 1, 1, -0.05, 1), # negative delta
(1, 1, 1, 1, 0.05, -1), # negative wait
],
)
def test_peak_pick_fail(x, pre_max, post_max, | |
mainleft = [sourcelabelDict[x.getName()] for x in threads if x.is_alive() is True and x.getName() in mainsourceDict]
info = mainleft
if debrid_status:
if len(info) > 6:
line3 = 'Waiting for: %s' % (str(len(info)))
elif len(info) > 0:
line3 = 'Waiting for: %s' % (', '.join(info))
else:
break
percent = int(100 * float(i) / (2 * timeout) + 0.5) % 100
if not progressDialog == control.progressDialogBG:
progressDialog.update(max(1, percent), line1, line2, line3)
else:
progressDialog.update(max(1, percent), line1, line3)
else:
if len(info) > 6:
line2 = 'Waiting for: %s' % (str(len(info)))
elif len(info) > 0:
line2 = 'Waiting for: %s' % (', '.join(info))
else:
break
percent = int(100 * float(i) / (2 * timeout) + 0.5) % 100
progressDialog.update(max(1, percent), line1, line2)
except:
break
time.sleep(0.5)
except:
import traceback
traceback.print_exc()
pass
progressDialog.close()
self.sourcesFilter()
return self.sources
def prepareSources(self):
try:
control.makeFile(control.dataPath)
self.sourceFile = control.providercacheFile
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS rel_url (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""rel_url TEXT, ""UNIQUE(source, imdb_id, season, episode)"");")
dbcur.execute("CREATE TABLE IF NOT EXISTS rel_src (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""hosts TEXT, ""added TEXT, ""UNIQUE(source, imdb_id, season, episode)"");")
dbcur.connection.commit()
dbcon.close()
except:
import traceback
traceback.print_exc()
pass
def getMovieSource(self, title, localtitle, aliases, year, imdb, source, call):
try:
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
except:
pass
''' Fix to stop items passed with a 0 IMDB id pulling old unrelated sources from the database. '''
if imdb == '0':
try:
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.connection.commit()
except:
import traceback
traceback.print_exc()
pass
''' END '''
try:
sources = []
dbcur.execute("SELECT * FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
match = dbcur.fetchone()
if not match is None:
t1 = int(re.sub('[^0-9]', '', str(match[5])))
t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
update = abs(t2 - t1) > 60
if update is False:
sources = eval(match[4].encode('utf-8'))
return self.sources.extend(sources)
except:
import traceback
traceback.print_exc()
pass
try:
url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
url = dbcur.fetchone()
if not url is None:
url = eval(url[4].encode('utf-8'))
except:
import traceback
traceback.print_exc()
pass
try:
if url is None:
url = call.movie(imdb, title, localtitle, aliases, year)
if not url is None:
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, '', '', repr(url)))
dbcur.connection.commit()
except:
import traceback
traceback.print_exc()
pass
try:
sources = []
sources = call.sources(url, self.hostDict, self.hostprDict)
if not sources is None and not sources == []:
sources = [json.loads(t) for t in set(json.dumps(d, sort_keys=True) for d in sources)]
for i in sources:
i.update({'provider': source})
self.sources.extend(sources)
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?)", (source, imdb, '', '', repr(sources), datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
dbcur.connection.commit()
dbcon.close()
except:
import traceback
traceback.print_exc()
dbcon.close()
pass
def getEpisodeSource(self, title, year, imdb, tvdb, season, episode, tvshowtitle, localtvshowtitle, aliases, premiered, source, call):
try:
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
except:
pass
try:
sources = []
dbcur.execute("SELECT * FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
match = dbcur.fetchone()
if not match is None:
t1 = int(re.sub('[^0-9]', '', str(match[5])))
t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
update = abs(t2 - t1) > 60
if update is False:
sources = eval(match[4].encode('utf-8'))
return self.sources.extend(sources)
except:
import traceback
traceback.print_exc()
pass
try:
url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
url = dbcur.fetchone()
if not url is None:
url = eval(url[4].encode('utf-8'))
except:
import traceback
traceback.print_exc()
pass
try:
if url is None:
url = call.tvshow(imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year)
if not url is None:
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, '', '', repr(url)))
dbcur.connection.commit()
except:
import traceback
traceback.print_exc()
pass
try:
ep_url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
ep_url = dbcur.fetchone()
if not ep_url is None:
ep_url = eval(ep_url[4].encode('utf-8'))
except:
import traceback
traceback.print_exc()
pass
try:
if not url is None:
if ep_url is None:
ep_url = call.episode(url, imdb, tvdb, title, premiered, season, episode)
if not ep_url is None:
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, season, episode, repr(ep_url)))
dbcur.connection.commit()
except:
import traceback
traceback.print_exc()
pass
try:
sources = []
sources = call.sources(ep_url, self.hostDict, self.hostprDict)
if not sources is None and not sources == []:
sources = [json.loads(t) for t in set(json.dumps(d, sort_keys=True) for d in sources)]
for i in sources:
i.update({'provider': source})
self.sources.extend(sources)
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?)", (source, imdb, season, episode, repr(sources), datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
dbcur.connection.commit()
dbcon.close()
except:
import traceback
traceback.print_exc()
dbcon.close()
pass
def alterSources(self, url, meta):
try:
if control.setting('hosts.mode') == '2':
url += '&select=1'
else:
isEpisode = True if 'episode' in meta else False
if isEpisode:
url += '&select=1'
else:
url += '&select=2'
control.execute('RunPlugin(%s)' % url)
except:
pass
def sourcesFilter(self):
provider = control.setting('hosts.sort.provider')
if provider == '':
provider = 'false'
debrid_only = control.setting('debrid.only')
if debrid_only == '':
debrid_only = 'false'
sortthemup = control.setting('torrent.sort.them.up')
if sortthemup == '':
sortthemup = 'false'
quality = control.setting('hosts.quality')
if quality == '':
quality = '0'
captcha = control.setting('hosts.captcha')
if captcha == '':
captcha = 'true'
HEVC = control.setting('HEVC')
###---Filter out duplicates
filter = []
for i in self.sources:
a = i['url'].lower()
for sublist in filter:
b = sublist['url'].lower()
if 'magnet:' in a and debrid.status() is True:
info_hash = re.search('magnet:.+?urn:\w+:([a-z0-9]+)', a)
if info_hash:
if info_hash.group(1) in b:
filter.remove(sublist)
log_utils.log('Removing %s - %s (DUPLICATE TORRENT) ALREADY IN :: %s' % (i['provider'], info_hash.group(1), sublist['provider']), log_utils.LOGDEBUG)
break
elif a == b:
filter.remove(sublist)
log_utils.log('Removing %s - %s (DUPLICATE LINK) ALREADY IN :: %s' % (i['provider'], i['url'], sublist['source']), log_utils.LOGDEBUG)
break
filter.append(i)
log_utils.log('Removed %s duplicate sources from list' % (len(self.sources) - len(filter)), log_utils.LOGDEBUG)
self.sources = filter
###---
##---Filter out uncached torrents
# filter = []
# for i in self.sources:
# a = i['url'].lower()
# from resources.lib.modules import premiumize
# cached = premiumize.PremiumizeMe.check_cache(a)
# log_utils.log('cached = %s' % cached, log_utils.LOGDEBUG)
# for sublist2 in filter:
# b = sublist2['url'].lower()
# # if 'magnet:' in a and debrid.status() is True:
# if (a.endswith('.torrent') or 'magnet:' in a) and debrid.status() is True:
# log_utils.log('line 942 from sources.py', log_utils.LOGDEBUG)
# from resources.lib.modules import premiumize
# cached = premiumize.PremiumizeMe.check_cache(a)
# log_utils.log('cached = %s' % cached, log_utils.LOGDEBUG)
# if cached is False:
# # log_utils.log('hello from filter', log_utils.LOGDEBUG)
# filter.remove(sublist2)
# log_utils.log('Removing %s - %s (DUPLICATE uncached TORRENT) :: %s' % (i['provider'], info_hash.group(1), sublist['provider']), log_utils.LOGDEBUG)
# break
# elif a == b:
# filter.remove(sublist2)
# log_utils.log('Removing %s - %s (DUPLICATE LINK) ALREADY IN :: %s' % (i['provider'], i['url'], sublist['source']), log_utils.LOGDEBUG)
# break
# filter.append(i)
# log_utils.log('Removed %s duplicate sources from list' % (len(self.sources) - len(filter)), log_utils.LOGDEBUG)
# self.sources | |
<filename>f8a_report/report_helper.py<gh_stars>0
"""Various utility functions used across the repo."""
import os
import json
import logging
from datetime import datetime as dt
import psycopg2
import psycopg2.extras
import itertools
import boto3
from psycopg2 import sql
from collections import Counter
from botocore.exceptions import ClientError
logger = logging.getLogger(__file__)
class Postgres:
"""Postgres connection session handler."""
def __init__(self):
"""Initialize the connection to Postgres database."""
conn_string = "host='{host}' dbname='{dbname}' user='{user}' password='{password}'".\
format(host=os.getenv('PGBOUNCER_SERVICE_HOST', 'bayesian-pgbouncer'),
dbname=os.getenv('POSTGRESQL_DATABASE', 'coreapi'),
user=os.getenv('POSTGRESQL_USER', 'coreapi'),
password=os.getenv('POSTGRESQL_PASSWORD', '<PASSWORD>'))
self.conn = psycopg2.connect(conn_string)
self.cursor = self.conn.cursor()
class S3Helper:
"""Helper class for storing reports to S3."""
def __init__(self):
"""Init method for the helper class."""
self.region_name = os.environ.get('AWS_S3_REGION') or 'us-east-1'
self.aws_s3_access_key = os.environ.get('AWS_S3_ACCESS_KEY_ID')
self.aws_s3_secret_access_key = os.environ.get('AWS_S3_SECRET_ACCESS_KEY')
self.deployment_prefix = os.environ.get('DEPLOYMENT_PREFIX') or 'dev'
self.report_bucket_name = os.environ.get('REPORT_BUCKET_NAME')
if self.aws_s3_secret_access_key is None or self.aws_s3_access_key is None or\
self.region_name is None or self.deployment_prefix is None:
raise ValueError("AWS credentials or S3 configuration was "
"not provided correctly. Please set the AWS_S3_REGION, "
"AWS_S3_ACCESS_KEY_ID, AWS_S3_SECRET_ACCESS_KEY, REPORT_BUCKET_NAME "
"and DEPLOYMENT_PREFIX correctly.")
# S3 endpoint URL is required only for local deployments
self.s3_endpoint_url = os.environ.get('S3_ENDPOINT_URL') or 'http://localhost'
self.s3 = boto3.resource('s3', region_name=self.region_name,
aws_access_key_id=self.aws_s3_access_key,
aws_secret_access_key=self.aws_s3_secret_access_key)
def store_json_content(self, content, bucket_name, obj_key):
"""Store the report content to the S3 storage."""
try:
logger.info('Storing the report into the S3 file %s' % obj_key)
self.s3.Object(bucket_name, obj_key).put(
Body=json.dumps(content, indent=2).encode('utf-8'))
except Exception as e:
logger.exception('%r' % e)
def read_json_object(self, bucket_name, obj_key):
"""Get the report json object found on the S3 bucket."""
try:
obj = self.s3.Object(bucket_name, obj_key)
result = json.loads(obj.get()['Body'].read().decode('utf-8'))
return result
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchKey':
logger.exception('No Such Key %s exists' % obj_key)
elif e.response['Error']['Code'] == 'NoSuchBucket':
logger.exception('ERROR - No Such Bucket %s exists' % bucket_name)
else:
logger.exception('%r' % e)
return None
def list_objects(self, bucket_name, frequency):
"""Fetch the list of objects found on the S3 bucket."""
prefix = '{dp}/{freq}'.format(dp=self.deployment_prefix, freq=frequency)
res = {'objects': []}
try:
for obj in self.s3.Bucket(bucket_name).objects.filter(Prefix=prefix):
if os.path.basename(obj.key) != '':
res['objects'].append(obj.key)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchKey':
logger.exception('ERROR - No Such Key %s exists' % prefix)
elif e.response['Error']['Code'] == 'NoSuchBucket':
logger.exception('ERROR - No Such Bucket %s exists' % bucket_name)
else:
logger.exception('%r' % e)
return res
class ReportHelper:
"""Stack Analyses report helper functions."""
def __init__(self):
"""Init method for the Report helper class."""
self.s3 = S3Helper()
self.pg = Postgres()
self.conn = self.pg.conn
self.cursor = self.pg.cursor
self.npm_model_bucket = os.getenv('NPM_MODEL_BUCKET', 'cvae-insights')
self.maven_model_bucket = os.getenv('MAVEN_MODEL_BUCKET', 'hpf-insights')
self.pypi_model_bucket = os.getenv('PYPI_MODEL_BUCKET', 'hpf-insights')
self.golang_model_bucket = os.getenv('GOLANG_MODEL_BUCKET', 'golang-insights')
def validate_and_process_date(self, some_date):
"""Validate the date format and apply the format YYYY-MM-DDTHH:MI:SSZ."""
try:
dt.strptime(some_date, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
return some_date
def retrieve_stack_analyses_ids(self, start_date, end_date):
"""Retrieve results for stack analyses requests."""
try:
start_date = self.validate_and_process_date(start_date)
end_date = self.validate_and_process_date(end_date)
except ValueError:
raise ValueError("Invalid date format")
# Avoiding SQL injection
query = sql.SQL('SELECT {} FROM {} WHERE {} BETWEEN \'%s\' AND \'%s\'').format(
sql.Identifier('id'), sql.Identifier('stack_analyses_request'),
sql.Identifier('submitTime')
)
self.cursor.execute(query.as_string(self.conn) % (start_date, end_date))
rows = self.cursor.fetchall()
id_list = []
for row in rows:
for col in row:
id_list.append(col)
return id_list
def flatten_list(self, alist):
"""Convert a list of lists to a single list."""
return list(itertools.chain.from_iterable(alist))
def datediff_in_millisecs(self, start_date, end_date):
"""Return the difference of two datetime strings in milliseconds."""
format = '%Y-%m-%dT%H:%M:%S.%f'
return (dt.strptime(end_date, format) -
dt.strptime(start_date, format)).microseconds / 1000
def populate_key_count(self, in_list=[]):
"""Generate a dict with the frequency of list elements."""
out_dict = {}
try:
for item in in_list:
if type(item) == dict:
logger.error('Unexpected key encountered %r' % item)
continue
if item in out_dict:
out_dict[item] += 1
else:
out_dict[item] = 1
except (IndexError, KeyError, TypeError) as e:
logger.exception('Error: %r' % e)
return {}
return out_dict
def set_unique_stack_deps_count(self, unique_stacks_with_recurrence_count):
"""Set the dependencies count against the identified unique stacks."""
out_dict = {}
for key in unique_stacks_with_recurrence_count.items():
new_dict = {}
for stack in key[1].items():
new_dict[stack[0]] = len(stack[0].split(','))
out_dict[key[0]] = new_dict
return out_dict
def normalize_deps_list(self, deps):
"""Flatten the dependencies dict into a list."""
normalized_list = []
for dep in deps:
normalized_list.append('{package} {version}'.format(package=dep['package'],
version=dep['version']))
return sorted(normalized_list)
def collate_raw_data(self, unique_stacks_with_recurrence_count, frequency):
"""Collate previous raw data with this week/month data."""
result = {}
# Get collated user input data
collated_user_input_obj_key = '{depl_prefix}/user-input-data/collated-{freq}.json'.format(
depl_prefix=self.s3.deployment_prefix, freq=frequency)
collated_user_input = self.s3.read_json_object(bucket_name=self.s3.report_bucket_name,
obj_key=collated_user_input_obj_key) or {}
for eco in unique_stacks_with_recurrence_count.keys() | collated_user_input.keys():
result.update({eco: dict(
Counter(unique_stacks_with_recurrence_count.get(eco)) +
Counter(collated_user_input.get(eco)))
})
# Store user input collated data back to S3
self.s3.store_json_content(content=result, bucket_name=self.s3.report_bucket_name,
obj_key=collated_user_input_obj_key)
# Get collated big query data
collated_big_query_obj_key = '{depl_prefix}/big-query-data/collated.json'.format(
depl_prefix=self.s3.deployment_prefix)
collated_big_query_data = self.s3.read_json_object(bucket_name=self.s3.report_bucket_name,
obj_key=collated_big_query_obj_key) or {}
for eco in result.keys() | collated_big_query_data.keys():
result.update({
eco: dict(Counter(result.get(eco)) +
Counter(collated_big_query_data.get(eco)))
})
return result
def store_training_data(self, result):
"""Store Training Data for each ecosystem in their respective buckets."""
model_version = dt.now().strftime('%Y-%m-%d')
for eco, stacks in result.items():
unique_stacks = {}
obj_key = '{eco}/{depl_prefix}/{model_version}/data/manifest.json'.format(
eco=eco, depl_prefix=self.s3.deployment_prefix, model_version=model_version)
package_list_for_eco = []
for packages, reccurrence_count in stacks.items():
package_list = [x.strip().split(' ')[0] for x in packages.split(',')]
stack_str = "".join(package_list)
if stack_str not in unique_stacks:
unique_stacks[stack_str] = 1
package_list_for_eco.append(package_list)
training_data = {
'ecosystem': eco,
'package_list': package_list_for_eco
}
# Get the bucket name based on ecosystems to store user-input stacks for retraining
if eco == 'maven':
bucket_name = self.maven_model_bucket
elif eco == 'pypi':
bucket_name = self.pypi_model_bucket
elif eco == 'go':
bucket_name = self.golang_model_bucket
elif eco == 'npm':
bucket_name = self.npm_model_bucket
else:
continue
if bucket_name:
logger.info('Storing user-input stacks for ecosystem {eco} at {dir}'.format(
eco=eco, dir=bucket_name + obj_key))
self.s3.store_json_content(content=training_data, bucket_name=bucket_name,
obj_key=obj_key)
def normalize_worker_data(self, start_date, end_date, stack_data, worker, frequency='weekly'):
"""Normalize worker data for reporting."""
total_stack_requests = {'all': 0, 'npm': 0, 'maven': 0}
if frequency == 'monthly':
report_name = dt.strptime(end_date, '%Y-%m-%d').strftime('%Y-%m')
else:
report_name = dt.strptime(end_date, '%Y-%m-%d').strftime('%Y-%m-%d')
stack_data = json.loads(stack_data)
template = {
'report': {
'from': start_date,
'to': end_date,
'generated_on': dt.now().isoformat('T')
},
'stacks_summary': {},
'stacks_details': []
}
all_deps = {'npm': [], 'maven': []}
all_unknown_deps = {'npm': [], 'maven': []}
all_unknown_lic = []
all_cve_list = []
total_response_time = {'all': 0.0, 'npm': 0.0, 'maven': 0.0}
if worker == 'stack_aggregator_v2':
stacks_list = {'npm': [], 'maven': []}
for data in stack_data:
stack_info_template = {
'ecosystem': '',
'stack': [],
'unknown_dependencies': [],
'license': {
'conflict': False,
'unknown': []
},
'security': {
'cve_list': [],
},
'response_time': ''
}
try:
user_stack_info = data[0]['stack_data'][0]['user_stack_info']
if len(user_stack_info['dependencies']) == 0:
continue
stack_info_template['ecosystem'] = user_stack_info['ecosystem']
total_stack_requests['all'] += 1
total_stack_requests[stack_info_template['ecosystem']] += 1
stack_info_template['stack'] = self.normalize_deps_list(
user_stack_info['dependencies'])
all_deps[user_stack_info['ecosystem']].append(stack_info_template['stack'])
stack_str = ','.join(stack_info_template['stack'])
stacks_list[user_stack_info['ecosystem']].append(stack_str)
unknown_dependencies = []
for dep in user_stack_info['unknown_dependencies']:
dep['package'] = dep.pop('name')
unknown_dependencies.append(dep)
stack_info_template['unknown_dependencies'] = self.normalize_deps_list(
unknown_dependencies)
all_unknown_deps[user_stack_info['ecosystem']].\
append(stack_info_template['unknown_dependencies'])
stack_info_template['license']['unknown'] = \
user_stack_info['license_analysis']['unknown_licenses']['really_unknown']
all_unknown_lic.append(stack_info_template['license']['unknown'])
for pkg in user_stack_info['analyzed_dependencies']:
for cve in pkg['security']:
stack_info_template['security']['cve_list'].append(cve)
all_cve_list.append('{cve}:{cvss}'.
format(cve=cve['CVE'], cvss=cve['CVSS']))
ended_at, started_at = \
data[0]['_audit']['ended_at'], data[0]['_audit']['started_at']
response_time = self.datediff_in_millisecs(started_at, ended_at)
stack_info_template['response_time'] = '%f ms' % response_time
total_response_time['all'] += response_time
total_response_time[stack_info_template['ecosystem']] += response_time
template['stacks_details'].append(stack_info_template)
except (IndexError, KeyError, TypeError) as e:
logger.exception('Error: %r' % e)
continue
unique_stacks_with_recurrence_count = {
'npm': self.populate_key_count(stacks_list['npm']),
'maven': self.populate_key_count(stacks_list['maven'])
}
# Collate Data from Previous Month for Model Retraining
collated_data = self.collate_raw_data(unique_stacks_with_recurrence_count, frequency)
# Store ecosystem specific data to their respective Training Buckets
if frequency == 'weekly':
self.store_training_data(collated_data)
unique_stacks_with_deps_count =\
self.set_unique_stack_deps_count(unique_stacks_with_recurrence_count)
avg_response_time = {}
if total_stack_requests['npm'] > 0:
avg_response_time['npm'] = total_response_time['npm'] / total_stack_requests['npm']
else:
avg_response_time['npm'] = 0
if total_stack_requests['maven'] > 0:
avg_response_time['maven'] = \
total_response_time['maven'] / total_stack_requests['maven']
else:
avg_response_time['maven'] = 0
# generate aggregated data section
template['stacks_summary'] = {
'total_stack_requests_count': total_stack_requests['all'],
'npm': {
'stack_requests_count': total_stack_requests['npm'],
'unique_dependencies_with_frequency':
self.populate_key_count(self.flatten_list(all_deps['npm'])),
'unique_unknown_dependencies_with_frequency':
self.populate_key_count(self.flatten_list(all_unknown_deps['npm'])),
'unique_stacks_with_frequency': unique_stacks_with_recurrence_count['npm'],
'unique_stacks_with_deps_count': unique_stacks_with_deps_count['npm'],
'average_response_time': '{} ms'.format(avg_response_time['npm'])
},
'maven': {
'stack_requests_count': total_stack_requests['maven'],
'total_stack_requests_count': total_stack_requests['maven'],
'unique_dependencies_with_frequency':
self.populate_key_count(self.flatten_list(all_deps['maven'])),
'unique_unknown_dependencies_with_frequency':
self.populate_key_count(self.flatten_list(all_unknown_deps['maven'])),
'unique_stacks_with_frequency': unique_stacks_with_recurrence_count['maven'],
'unique_stacks_with_deps_count': unique_stacks_with_deps_count['maven'],
'average_response_time': '{} ms'.format(avg_response_time['maven'])
},
'unique_unknown_licenses_with_frequency':
self.populate_key_count(self.flatten_list(all_unknown_lic)),
'unique_cves':
self.populate_key_count(all_cve_list),
'total_average_response_time':
'{} ms'.format(total_response_time['all'] / len(template['stacks_details'])),
}
try:
obj_key = '{depl_prefix}/{freq}/{report_name}.json'.format(
depl_prefix=self.s3.deployment_prefix, freq=frequency, report_name=report_name
)
self.s3.store_json_content(content=template, obj_key=obj_key,
bucket_name=self.s3.report_bucket_name)
except Exception as e:
logger.exception('Unable to store the report on S3. Reason: %r' % e)
return template
else:
# todo: user feedback aggregation based on the recommendation task results
return None
def retrieve_worker_results(self, start_date, end_date, id_list=[], worker_list=[],
frequency='weekly'):
"""Retrieve results for selected worker from RDB."""
result = {}
# convert the elements of the id_list to sql.Literal
# so that the SQL query statement contains the IDs within quotes
id_list = list(map(sql.Literal, id_list))
ids = sql.SQL(', ').join(id_list).as_string(self.conn)
for worker in worker_list:
query = sql.SQL('SELECT {} FROM {} WHERE {} IN (%s) AND {} = \'%s\'').format(
sql.Identifier('task_result'), sql.Identifier('worker_results'),
sql.Identifier('external_request_id'), sql.Identifier('worker')
)
self.cursor.execute(query.as_string(self.conn) % (ids, worker))
data = json.dumps(self.cursor.fetchall())
# associate the retrieved data to the worker name
result[worker] = self.normalize_worker_data(start_date, | |
<reponame>TanayGahlot/mne-python<gh_stars>0
"""Conversion tool from Brain Vision EEG to FIF"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import time
import re
import warnings
import numpy as np
from ...utils import verbose, logger
from ..constants import FIFF
from ..meas_info import _empty_info
from ..base import _BaseRaw, _check_update_montage
from ...externals.six import StringIO, u
from ...externals.six.moves import configparser
class RawBrainVision(_BaseRaw):
"""Raw object from Brain Vision EEG file
Parameters
----------
vhdr_fname : str
Path to the EEG header file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0).
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the vhdr file.
Default is ('HEOGL', 'HEOGR', 'VEOGb').
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. Default is None.
reference : None | str
Name of the electrode which served as the reference in the recording.
If a name is provided, a corresponding channel is added and its data
is set to 0. This is useful for later re-referencing. The name should
correspond to a name in elp_names.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, vhdr_fname, montage=None,
eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=None, reference=None,
scale=1., preload=False, verbose=None):
# Preliminary Raw attributes
self._events = np.empty((0, 3))
self.preload = False
# Channel info and events
logger.info('Extracting eeg Parameters from %s...' % vhdr_fname)
vhdr_fname = os.path.abspath(vhdr_fname)
if not isinstance(scale, (int, float)):
raise TypeError('Scale factor must be an int or float. '
'%s provided' % type(scale))
self.info, self._eeg_info, events = _get_eeg_info(vhdr_fname,
reference, eog,
misc)
self._eeg_info['scale'] = float(scale)
logger.info('Creating Raw.info structure...')
_check_update_montage(self.info, montage)
self.set_brainvision_events(events)
# Raw attributes
self.verbose = verbose
self._filenames = list()
self.rawdirs = list()
self.cals = np.ones(len(self.info['chs']))
self.orig_format = 'double'
self._projector = None
self.comp = None # no compensation for EEG
self.proj = False
self.first_samp = 0
with open(self.info['filename'], 'rb') as f:
f.seek(0, os.SEEK_END)
n_samples = f.tell()
dtype = int(self._eeg_info['dtype'][-1])
n_chan = self.info['nchan']
self.last_samp = (n_samples // (dtype * self._eeg_info['n_data_chan'])) - 1
self._reference = reference
self._raw_lengths = np.array([self.n_times])
self._first_samps = np.array([self.first_samp])
self._last_samps = np.array([self.last_samp])
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % vhdr_fname)
self._data, _ = self._read_segment()
assert len(self._data) == self.info['nchan']
# Add time info
self._times = np.arange(self.first_samp, self.last_samp + 1,
dtype=np.float64)
self._times /= self.info['sfreq']
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
% (self.first_samp, self.last_samp,
float(self.first_samp) / self.info['sfreq'],
float(self.last_samp) / self.info['sfreq']))
logger.info('Ready.')
def __repr__(self):
n_chan = self.info['nchan']
data_range = self.last_samp - self.first_samp + 1
s = ('%r' % os.path.basename(self.info['filename']),
"n_channels x n_times : %s x %s" % (n_chan, data_range))
return "<RawEEG | %s>" % ', '.join(s)
def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
projector=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, shape (n_channels, n_samples)
The data.
times : array, shape (n_samples,)
returns the time values corresponding to the samples.
"""
if sel is not None:
if len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
return (666, 666)
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
# Initial checks
start = int(start)
stop = int(stop)
if start >= stop:
raise ValueError('No data in this range')
# assemble channel information
eeg_info = self._eeg_info
sfreq = self.info['sfreq']
chs = self.info['chs']
units = eeg_info['units']
if self._reference:
chs = chs[:-1]
units = units[:-1]
if len(self._events):
chs = chs[:-1]
n_eeg = len(chs)
cals = np.atleast_2d([chan_info['cal'] for chan_info in chs])
cals *= eeg_info['scale'] * units
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(start, stop - 1, start / float(sfreq),
(stop - 1) / float(sfreq)))
# read data
dtype = np.dtype(eeg_info['dtype'])
buffer_size = (stop - start)
pointer = start * n_eeg * dtype.itemsize
with open(self.info['filename'], 'rb') as f:
f.seek(pointer)
# extract data
data_buffer = np.fromfile(f, dtype=dtype,
count=buffer_size * n_eeg)
if eeg_info['data_orientation'] == 'MULTIPLEXED':
data_buffer = data_buffer.reshape((n_eeg, -1), order='F')
elif eeg_info['data_orientation'] == 'VECTORIZED':
data_buffer = data_buffer.reshape((n_eeg, -1), order='C')
n_channels, n_times = data_buffer.shape
# Total number of channels
n_channels += int(self._reference is not None)
n_channels += int(len(self._events) > 0)
# Preallocate data array
data = np.empty((n_channels, n_times), dtype=np.float64)
data[:len(data_buffer)] = data_buffer # cast to float64
data[:len(data_buffer)] *= cals.T
ch_idx = len(data_buffer)
del data_buffer
# add reference channel and stim channel (if applicable)
if self._reference:
data[ch_idx] = 0.
ch_idx += 1
if len(self._events):
data[ch_idx] = _synthesize_stim_channel(self._events, start, stop)
ch_idx += 1
if sel is not None:
data = data.take(sel, axis=0)
logger.info('[done]')
times = np.arange(start, stop, dtype=float) / sfreq
return data, times
def get_brainvision_events(self):
"""Retrieve the events associated with the Brain Vision Raw object
Returns
-------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
return self._events.copy()
def set_brainvision_events(self, events):
"""Set the events (automatically updates the synthesized stim channel)
Parameters
----------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
events = np.copy(events)
if not events.ndim == 2 and events.shape[1] == 3:
raise ValueError("[n_events x 3] shaped array required")
# update info based on presence of stim channel
had_events = bool(len(self._events))
has_events = bool(len(events))
if had_events and not has_events: # remove stim channel
if self.info['ch_names'][-1] != 'STI 014':
err = "Last channel is not stim channel; info was modified"
raise RuntimeError(err)
self.info['nchan'] -= 1
del self.info['ch_names'][-1]
del self.info['chs'][-1]
if self.preload:
self._data = self._data[:-1]
elif has_events and not had_events: # add stim channel
idx = len(self.info['chs']) + 1
chan_info = {'ch_name': 'STI 014',
'kind': FIFF.FIFFV_STIM_CH,
'coil_type': FIFF.FIFFV_COIL_NONE,
'logno': idx,
'scanno': idx,
'cal': 1,
'range': 1,
'unit_mul': 0,
'unit': FIFF.FIFF_UNIT_NONE,
'eeg_loc': np.zeros(3),
'loc': np.zeros(12)}
self.info['nchan'] += 1
self.info['ch_names'].append(chan_info['ch_name'])
self.info['chs'].append(chan_info)
if self.preload:
shape = (1, self._data.shape[1])
self._data = np.vstack((self._data, np.empty(shape)))
# update events
self._events = events
if has_events and self.preload:
start = self.first_samp
stop = self.last_samp + 1
self._data[-1] = _synthesize_stim_channel(events, start, stop)
def _read_vmrk_events(fname):
"""Read events from a vmrk file
Parameters
----------
fname : str
vmrk file to be read.
Returns
-------
events : array, shape (n_events, 3)
An array containing the whole recording's events, each row representing
an event as (onset, duration, trigger) sequence.
"""
# read vmrk file
with open(fname) as fid:
txt = fid.read()
header = txt.split('\n')[0].strip()
start_tag = 'Brain Vision Data Exchange Marker File'
if not header.startswith(start_tag):
raise ValueError("vmrk file should start with %r" % start_tag)
end_tag = 'Version 1.0'
if not header.endswith(end_tag):
raise ValueError("vmrk file should be %r" % end_tag)
# extract Marker Infos block
m = re.search("\[Marker Infos\]", txt)
if not m:
return np.zeros(0)
mk_txt = txt[m.end():]
m = re.search("\[.*\]", mk_txt)
if m:
mk_txt = mk_txt[:m.start()]
# extract event information
items = re.findall("^Mk\d+=(.*)", mk_txt, re.MULTILINE)
events = []
for info in items:
mtype, mdesc, onset, duration = info.split(',')[:4]
try:
trigger = int(re.findall('[A-Za-z]*\s*?(\d+)', mdesc)[0])
onset = int(onset)
duration = int(duration)
events.append((onset, duration, trigger))
| |
escape_((service), True), '.com/users/', escape_((user_id), True), '/', escape_((username), True), '">', escape_((username), True), '</a></p>\n')
yield '', join_(' <table cellpadding="2" cellspacing="0">\n')
for question in loop.setup(result):
yield '', join_(' ', '<tr class="', escape_(loop.parity, True), '">\n')
yield '', join_(' ', ' <td class="printer">\n')
yield '', join_(' ', ' <a target="_blank" href="/export?question=', escape_((question.question_id), True), '&format=HTML&service=', escape_((question.service), True), '&linktohome=false"/>\n')
yield '', join_(' ', ' <img title="Printer-Friendly" src="images/printer_black.png"/>\n')
yield '', join_(' ', ' </a>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="quicklook">\n')
yield '', join_(' ', ' <a onclick="javascript:quicklook(', escape_((question.question_id), True), ",'", escape_((question.service), True), '\');return false;" href="#"/>\n')
yield '', join_(' ', ' <img title="Quicklook" src="images/quicklook.png"/>\n')
yield '', join_(' ', ' </a>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="counters">\n')
yield '', join_(' ', ' [', escape_(question.get_votes(), True), ']<br>[', escape_(question.answer_count, True), ']\n')
yield '', join_(' ', ' </td> \n')
yield '', join_(' ', ' <td class="title">\n')
yield '', join_(' ', ' <a target="_blank" href="', escape_(question.url, True), '"/>', escape_(htmlquote(question.title), True), '</a><br>\n')
yield '', join_(' ', ' <span class="tag">\n')
yield '', join_(' ', ' [', escape_((", ".join([tag for tag in question.tags_list])), True), ']\n')
yield '', join_(' ', ' </span>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="date">\n')
yield '', join_(' ', ' [', escape_((question.creation_date.strftime('%Y-%m-%d')), True), ']\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', '</tr>\n')
if loop.last:
yield '', join_(' ', '</table>\n')
yield '', join_(' ', '<table id="pagination">\n')
yield '', join_(' ', ' <tr>\n')
yield '', join_(' ', ' <td class="pagination_found">Found: ', escape_(commify(pagination.total), True), '</td>\n')
yield '', join_(' ', ' <td class="pagination_page">\n')
if pagination.has_previous_entries():
yield '', join_(' ', ' <a href="/favorites?service=', escape_((service), True), '&userid=', escape_((user_id), True), '&page=', escape_((pagination.page-1), True), '&pagesize=', escape_((pagination.pagesize), True), '">« prev </a>\n')
for page in loop.setup(pagination.get_pretty_pagination()):
if page != -1:
yield '', join_(' ', '<a href="/favorites?service=', escape_((service), True), '&userid=', escape_((user_id), True), '&page=', escape_((page), True), '&pagesize=', escape_((pagination.pagesize), True), '">\n')
if page == pagination.page:
yield '', join_(' ', '|', escape_((page), True), '| \n')
else:
yield '', join_(' ', escape_(page, True), ' \n')
yield '', join_(' ', '</a>\n')
else:
yield '', join_(' ', escape_(pagination.separator, True), '\n')
if pagination.has_more_entries():
yield '', join_(' ', ' <a href="/favorites?service=', escape_((service), True), '&userid=', escape_((user_id), True), '&page=', escape_((pagination.page+1), True), '&pagesize=', escape_((pagination.pagesize), True), '"> next »</a>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="pagination_pagesize">Pagesize: ', escape_(pagination.pagesize, True), '</td>\n')
yield '', join_(' ', ' </tr>\n')
yield '', join_(' ', '</table>\n')
else:
if len(result) == 0:
yield '', join_(' ', '<p id="not_found">\n')
yield '', join_(' ', ' No questions found\n')
yield '', join_(' ', '</p>\n')
yield '', join_(' </body>\n')
yield '', join_('</html>\n')
return __template__
favorites_stackexchange = CompiledTemplate(favorites_stackexchange(), 'apps/app/views/favorites_stackexchange.html')
def export():
loop = ForLoop()
_dummy = CompiledTemplate(lambda: None, "dummy")
join_ = _dummy._join
escape_ = _dummy._escape
def __template__ (service, post, pretty_links, printer, link_to_home, pretty_print, comments, answer_id, hide_question, font_family):
yield '', join_('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
yield '', join_('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">\n')
yield '', join_(' <head>\n')
yield '', join_(' <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>\n')
yield '', join_(' <style>\n')
yield '', join_(' body {\n')
yield '', join_(' background: white;\n')
yield '', join_(' font-size: 10pt;\n')
yield '', join_(' font-family: "', escape_((font_family), True), '";\n')
yield '', join_(' text-align:justify;\n')
yield '', join_(' }\n')
yield '', join_(' </style>\n')
yield '', join_(' <link rel="stylesheet" href="/stylesheets/export.css?v=1"/>\n')
yield '', join_(' <link rel="shortcut icon" type="image/x-icon" href="/favicon.ico"/>\n')
yield '', join_(' <link rel="canonical" href="http://www.stackprinter.com/export?service=', escape_(service, True), '&question=', escape_((post.question['question_id']), True), '&printer=false&linktohome=true"/>\n')
yield '', join_(' <title>', escape_(post.question['title'], True), '</title>\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/jquery-1.4.2.min.js"></script>\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/main.js"></script>\n')
if pretty_print and 'Prettify' in supported_services.info[service].get('markdown_extensions',''):
yield '', join_(' ', ' <script type="text/javascript" src="/javascripts/prettify/prettify.js"></script>\n')
yield '', join_(' ', ' <link rel="stylesheet" href="/stylesheets/prettify.css"> \n')
yield '', join_(' ', ' <script type="text/javascript">jQuery(document).ready(function(){StyleCode();});</script>\n')
if 'MathJax' in supported_services.info[service].get('markdown_extensions',''):
yield '', join_(' ', '<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML">\n')
yield '', join_(' ', '\n')
yield '', join_(' ', ' MathJax.Hub.Config({"HTML-CSS": { preferredFont: "TeX", availableFonts: ["STIX","TeX"] },\n')
yield '', join_(' ', ' tex2jax: { inlineMath: [ ["', '$', '", "', '$', '"], ["\\\\\\\\(","\\\\\\\\)"] ], displayMath: [ ["', '$', '$', '","', '$', '$', '"], ["\\\\[", "\\\\]"] ], processEscapes: true, ignoreClass: "tex2jax_ignore|dno" },\n')
yield '', join_(' ', ' TeX: {extensions: ["mhchem.js"], noUndefined: { attributes: { mathcolor: "red", mathbackground: "#FFEEEE", mathsize: "90%" } } },\n')
yield '', join_(' ', ' messageStyle: "none"\n')
yield '', join_(' ', ' });\n')
yield '', join_(' ', 'MathJax.Hub.Startup.onload();\n')
yield '', join_(' ', '</script>\n')
if printer:
yield '', join_(' ', '<script type="text/javascript">\n')
yield '', join_(' ', ' MathJax.Hub.Queue(["Delay",MathJax.Callback,700],Print)\n')
yield '', join_(' ', '</script>\n')
else:
if printer:
yield '', join_(' ', '<script type="text/javascript">\n')
yield '', join_(' ', ' jQuery(document).ready(function(){Print();});\n')
yield '', join_(' </script>\n')
yield '', join_(' </head>\n')
yield '', join_(' <body>\n')
yield '', join_(' <div id="home">\n')
if link_to_home:
yield '', join_(' ', '<a href="/"><img title="Back to home" width="20px" height="20px" src="/images/icon_home.png" style="border:0"/></a>\n')
yield '', join_(' <a href="http://www.stackprinter.com/export?format=HTML&service=', escape_(service, True), '&printer=false&question=', escape_((post.question['question_id']), True), '"><img title="Link to this printed question" width="20px" height="20px" alt="share" src="/images/Share.gif" style="border:0"/></a>\n')
yield '', join_(' </div>\n')
yield '', join_(' <div id="question-block"> \n')
yield '', join_(' <div id="question-title"> \n')
yield '', join_(' <img alt="', escape_((supported_services.info[service]['name']), True), '" src="', escape_((supported_services.info[service]['icon_url']), True), '"/>', escape_(htmlquote(post.question['title']), True), '<br/>\n')
yield '', join_(' </div>\n')
if not hide_question:
yield '', join_(' ', '<div id="donate">\n')
yield '', join_(' ', '[Please support Stackprinter with a donation]\n')
yield '', join_(' ', ' <form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top">\n')
yield '', join_(' ', ' <input type="hidden" name="cmd" value="_donations" />\n')
yield '', join_(' ', ' <input type="hidden" name="business" value="YWAN3PD3PQE2L" />\n')
yield '', join_(' ', ' <input type="hidden" name="item_name" value="thank you for supporting Stackprinter" />\n')
yield '', join_(' ', ' <input type="hidden" name="currency_code" value="USD" />\n')
yield '', join_(' ', ' <input type="hidden" name="country" value="USA" /> \n')
yield '', join_(' ', ' <input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif" border="0" name="submit" title="Please support Stackprinter making a PayPal donation!" alt="Donate with PayPal button" />\n')
yield '', join_(' ', ' <img alt="" border="0" src="https://www.paypal.com/en_US/i/scr/pixel.gif" width="1" height="1" />\n')
yield '', join_(' ', ' </form>\n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '<div class="question-details">\n')
yield '', join_(' ', ' [', escape_((['','+'][(int(post.question['up_vote_count'])-int(post.question['down_vote_count']))>0]), True), escape_((int(post.question['up_vote_count'])-int(post.question['down_vote_count'])), True), '] [', escape_(post.question['answer_count'], True), ']\n')
yield '', join_(' ', ' ', escape_(post.question.get('owner', {'display_name':'community_owned'}).get('display_name','community_owned'), True), ' \n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '<div class="question-details">\n')
yield '', join_(' ', ' [', escape_(date_from(float(post.question['creation_date'])), True), ']\n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '<div class="question-details">\n')
yield '', join_(' ', ' [\n')
for tag in loop.setup(post.question['tags']):
yield '', join_(' ', escape_(tag, True), '\n')
yield '', join_(' ', ' ]\n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '<div class="question-details">\n')
if post.question.has_key('link'):
yield '', join_(' ', '[ ', escape_(post.question['link'], True), ' ]\n')
else:
yield '', join_(' ', '[ http://', escape_((service), True), '.com/questions/', escape_((post.question['question_id']), True), ']\n')
if post.is_deleted():
yield '', join_(' ', '[DELETED]\n')
yield '', join_(' ', '</div> \n')
yield '', join_(' ', '<div id="question">\n')
if pretty_links:
(sup_question_body,sup_question_links) = suppify_body(post.question['body'])
yield '', join_(' ', escape_(sup_question_body, True), '\n')
yield '', join_(' ', '<div id="question-links">\n')
for key in loop.setup(sup_question_links.keys()):
yield '', join_(' ', '[', escape_((key), True), '] ', escape_((sup_question_links[key].replace("&","&")), True), '<br/>\n')
yield '', join_(' ', '</div>\n')
else:
yield '', join_(' ', escape_(post.question['body'], True), '\n')
yield '', join_(' ', '</div>\n')
if post.question.get('comments') and comments:
yield '', join_(' ', '<div class="question-comments"> \n')
for comment in loop.setup(post.question['comments']):
yield '', join_(' ', '<div class="comment">\n')
if int(comment['score']) > 0:
yield '', join_(' ', '(', escape_(comment['score'], True), ') \n')
yield '', join_(' ', ' ', escape_(comment['body'], True), ' - <b> ', escape_(comment.get('owner', {'display_name':'community_owned'}).get('display_name','community_owned'), True), '</b> \n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '</div>\n')
yield '', join_(' <div class="answers">\n')
for answer_number, answer in loop.setup(enumerate(post.answers)):
if (not answer_id or answer['answer_id'] == int(answer_id)):
yield '', join_(' ', '<div class="answer-details">\n')
yield '', join_(' ', ' [', escape_((['','+'][(int(answer['up_vote_count'])-int(answer['down_vote_count']))>0]), True), escape_((int(answer['up_vote_count'])-int(answer['down_vote_count'])), True), ']\n')
yield '', join_(' ', ' [', escape_(date_from(float(answer['creation_date'])), True), '] \n')
yield '', join_(' ', ' ', escape_(answer.get('owner', {'display_name':'community_owned'}).get('display_name','community_owned'), True), '\n')
if bool(answer.get('accepted') or answer.get('is_accepted')):
yield '', join_(' ', '[<img height="17px" width="17px" src="/images/blackflag.png"/>ACCEPTED]\n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '<div | |
none of above applied (should not happen)
return None
def processByInequality(self, reduced_reaction):
"""
Cross-examine inequality and add arcs if possible.
One example is, A + B -> C + D where A < C is given
by existing arc.
We can conclude B > D and add an arc.
Return True if processed, False otherwise
:param Reaction reduced_reaction:
:return bool:
"""
# create lists of soms with stoichiometry
reactants = collections.deque([
SOMMoleculeStoichiometry(som = self.getNode(moles.molecule),
molecule = moles.molecule,
stoichiometry = moles.stoichiometry) for \
moles in reduced_reaction.reactants])
products = collections.deque([
SOMMoleculeStoichiometry(som = self.getNode(moles.molecule),
molecule = moles.molecule,
stoichiometry = moles.stoichiometry) for \
moles in reduced_reaction.products])
reactant_soms = list({reactant.som for reactant in reactants})
product_soms = list({product.som for product in products})
#
reactant_lessthan_product = []
product_lessthan_reactant = []
no_relationship = []
for pair in itertools.product(reactant_soms, product_soms):
if self.has_edge(pair[0], pair[1]):
reactant_lessthan_product.append(pair)
elif self.has_edge(pair[1], pair[0]):
product_lessthan_reactant.append(pair)
else:
no_relationship.append(pair)
# print("reduced reaction...", reduced_reaction.makeIdentifier(is_include_kinetics=False))
# print("reactant_lessthan_product: ", reactant_lessthan_product)
# print("product_lessthan_reactant: ", product_lessthan_reactant)
# print("no_realtionship :", no_relationship)
# print("----------------------------------------------------------")
# now, want to infer the relationship of no_relationship
# or prove if existing relationships conflict
if not no_relationship:
return False
# if both directions exist, let's say we cannot do anything; return False
if reactant_lessthan_product and product_lessthan_reactant:
return False
def processPairs(pairs, small, big, idx_small, idx_big):
# under product_lessthan_reactant, idx_small = 1, idx_big = 0
# under the same, small = products, big = reactants
# soms_buffer is same side as small_som
# remaining_soms is same side as big_som
big_som_stoichiometry = 0
small_som_stoichiometry = 0
soms_buffer = [pair[idx_small] for pair in no_relationship]
remaining_soms = [pair[idx_big] for pair in no_relationship]
for pair in pairs:
print("We are dealing with, ", pair)
big_som_stoichiometry += sum([
sms.stoichiometry for sms in big if sms.som==pair[idx_big]])
small_som_stoichiometry += sum([
sms.stoichiometry for sms in small if sms.som==pair[idx_small]])
if pair[idx_small] in soms_buffer:
soms_buffer.remove(pair[idx_small])
if pair[idx_big] in remaining_soms:
remaining_soms.remove(pair[idx_big])
print("big_som_stoi, ", big_som_stoichiometry)
print("small_som_stoi, ", small_som_stoichiometry)
# if product_som_stoichiometry is bigger, it's okay
# if not, check if there is at least one buffer on product;
# if yes, try adding an arc if the buffer is at least one
if big_som_stoichiometry < small_som_stoichiometry:
return False
elif soms_buffer:
if len(soms_buffer)==1:
# add arc
for arc_source in remaining_soms:
# the SOMs cannot be the same because they were already reduced
self.addArc(arc_source, soms_buffer[0])
return True
# cannot decide now because there are more than two buffers
else:
return False
# no buffer; add error
else:
if reduced_reaction in self.type_four_errors:
return False
else:
self.type_four_errors.append(reduced_reaction)
print("type four error added!", reduced_reaction)
return True
if product_lessthan_reactant:
return processPairs(
pairs=product_lessthan_reactant,
big=reactants,
small=products,
idx_big=0, idx_small=1)
elif reactant_lessthan_product:
return processPairs(
pairs=reactant_lessthan_product,
big=products,
small=reactants,
idx_big=1, idx_small=0)
return False
def addArc(self, arc_source, arc_destination, reaction):
"""
Add a single arc (edge) using two SOMs and reaction.
:param SOM arc_source:
:param SOM arc_destination:
:param Reaction reaction:
"""
# if there is already a preious reaction,
if self.has_edge(arc_source, arc_destination):
reaction_label = self.get_edge_data(arc_source, arc_destination)[cn.REACTION]
# if reaction.label is not already included in the attribute,
if reaction.label not in set(reaction_label):
reaction_label = reaction_label + [reaction.label]
else:
reaction_label = [reaction.label]
# overwrite the edge with new reactions set
self.add_edge(arc_source, arc_destination, reaction=reaction_label)
def getSOMPath(self, som, mole1, mole2):
"""
Create an undirected graph between
two molecules within a SOM
and find the shortest path
:param SOM som:
:param Molecule mole1:
:param Molecule mole2:
:return PathComponents som_path:
"""
molecule1 = mole1.name
molecule2 = mole2.name
# construct undirected graph
subg = nx.Graph()
# here, every reaction is 1-1 reaction
for reaction in list(som.reactions):
node1 = reaction.reactants[0].molecule.name
node2 = reaction.products[0].molecule.name
if subg.has_edge(node1, node2):
reaction_label = subg.get_edge_data(node1, node2)[cn.REACTION]
# if reaction.label is not already included in the attribute,
if reaction.label not in set(reaction_label):
reaction_label = reaction_label + [reaction.label]
else:
reaction_label = [reaction.label]
subg.add_edge(node1, node2, reaction=reaction_label)
path = [short_p for short_p in nx.shortest_path(subg,
source=molecule1,
target=molecule2)]
som_path = []
for idx in range(len(path)-1):
edge_reactions = subg.get_edge_data(path[idx], path[idx+1])[cn.REACTION]
som_path.append(cn.PathComponents(node1=path[idx],
node2=path[idx+1],
reactions=edge_reactions))
return som_path
def printSOMPath(self, molecule_name1, molecule_name2):
"""
Print out shortest SOM path between two molecules.
Arguments are str and both molecules sholud be in the
same SOM.
:param str molecule_name1:
:param str molecule_name2:
:return bool/str:
"""
path_report = NULL_STR
som1 = self.getNode(self.simple.getMolecule(molecule_name1))
som2 = self.getNode(self.simple.getMolecule(molecule_name2))
if som1 != som2:
return False
else:
# add case when molecule_name1 == molecule_name2
if molecule_name1 == molecule_name2:
# print("Clearly,", molecule_name1, cn.EQUAL, molecule_name2)
path_report = path_report + "Clearly, %s %s %s\n" % (
molecule_name1, cn.EQUAL, molecule_name2)
else:
som_path = self.getSOMPath(som1,
self.simple.getMolecule(molecule_name1),
self.simple.getMolecule(molecule_name2))
for pat in som_path:
# print("\n%s %s %s by reaction(s):" % (pat.node1, cn.EQUAL, pat.node2))
path_report = path_report + "\n%s %s %s by reaction(s):\n" % (pat.node1, cn.EQUAL, pat.node2)
for r in pat.reactions:
som_reaction = self.simple.getReaction(r)
# print(som_reaction.makeIdentifier(is_include_kinetics=False))
path_report = path_report + "%s\n" % (som_reaction.makeIdentifier(is_include_kinetics=False))
return path_report
def addTypeOneError(self, mole1, mole2, reaction):
"""
Add Type I Error components to self.type_one_errors
All components of resulting PathComponents are str
:param Molecule mole1:
:param Molecule mole2:
:param Reaction reaction:
:return bool flag:
"""
flag = False
for component in self.type_one_errors:
if (component.node1==mole1.name) and (component.node2==mole2.name):
new_component = cn.PathComponents(node1=mole1.name,
node2=mole2.name,
reactions=component.reactions+[reaction.label])
self.type_one_errors.remove(component)
self.type_one_errors.append(new_component)
flag = True
break
if not flag:
self.type_one_errors.append(cn.PathComponents(node1=mole1.name,
node2=mole2.name,
reactions=[reaction.label]))
flag = True
return flag
def checkTypeOneError(self, arc, inequality_reaction=None):
"""
Check Type I Error of an arc.
If both source and destination are found
in the same SOM, send error message and return True.
If not, return False.
:param tuple-Molecule arc:
:param Reaction inequality_reaction:
:return bool:
"""
som1 = self.getNode(arc[0])
som2 = self.getNode(arc[1])
if som1 == som2:
self.addTypeOneError(arc[0], arc[1], inequality_reaction)
return True
else:
return False
def addTypeTwoError(self, cycle):
"""
Add Type II Error components to self.type_two_errors
which is a list of lists
All components of resulting PathComponents are str
:param list-SOM cycle:
"""
# exceptionally, here PathComponents are
# node1=[], node2=[], reactions=[] and their index
# of each component will match. All elements within nodes
# are in the same SOM
error_cycle = []
for node_idx in range(len(cycle)-1):
som1 = cycle[node_idx]
som2 = cycle[node_idx+1]
som1_moles = {mole.name for mole in list(som1.molecules)}
som2_moles = {mole.name for mole in list(som2.molecules)}
reactions = self.get_edge_data(som1, som2)[cn.REACTION]
# all reactions (in an edge), should create a single PathComponent
nodes1 = []
nodes2 = []
reaction_labels = []
for r in reactions:
reaction = self.simple.getReaction(r)
if reaction.category == cn.REACTION_n_1:
sources = {r.molecule.name for r in reaction.reactants}
destinations = {p.molecule.name for p in reaction.products}
elif reaction.category == cn.REACTION_1_n:
sources = {p.molecule.name for p in reaction.products}
destinations = {r.molecule.name for r in reaction.reactants}
# for any reaction that addes arcs, len(nodes2)==1
node2 = list(destinations.intersection(som2_moles))[0]
for node1 in list(sources.intersection(som1_moles)):
nodes1.append(node1)
nodes2.append(node2)
reaction_labels.append(reaction.label)
error_cycle.append(cn.PathComponents(node1=nodes1,
node2=nodes2,
reactions=reaction_labels))
som1 = cycle[-1]
som2 = cycle[0]
som1_moles = {mole.name for mole in list(som1.molecules)}
som2_moles = {mole.name for mole in list(som2.molecules)}
reactions = self.get_edge_data(som1, som2)[cn.REACTION]
# all reactions (in an edge), should create a single PathComponent
nodes1 = []
nodes2 = []
reaction_labels = []
for r in reactions:
reaction = self.simple.getReaction(r)
if reaction.category == cn.REACTION_n_1:
sources = {r.molecule.name for r in reaction.reactants}
destinations = {p.molecule.name for p in reaction.products}
elif reaction.category == cn.REACTION_1_n:
sources = {p.molecule.name for p in reaction.products}
destinations = {r.molecule.name for r in reaction.reactants}
# for any reaction that addes arcs, len(nodes2)==1
node2 = list(destinations.intersection(som2_moles))[0]
for node1 in list(sources.intersection(som1_moles)):
nodes1.append(node1)
nodes2.append(node2)
reaction_labels.append(reaction.label)
error_cycle.append(cn.PathComponents(node1=nodes1,
node2=nodes2,
reactions=reaction_labels))
self.type_two_errors.append(error_cycle)
def checkTypeTwoError(self):
"""
Check Type II Error (cycles) of a MESGraph.
If there is at least one cycle,
report an error message, related reactions
and return True.
If there is no cycle, return False.
:return bool:
"""
graph = nx.DiGraph()
graph.add_edges_from(self.edges)
cycles = list(nx.simple_cycles(graph))
if len(cycles) == 0:
return False
else:
for cycle in cycles:
self.addTypeTwoError(cycle)
if not self.type_two_error:
self.type_two_error = True
return True
def checkTypeFiveError(self):
"""
Check Type V Error (cycles) of a MESGraph.
If there is at least one cycle,
add cycle to self.type_five_errors.
The biggest difference between type | |
(KHTML, like Gecko) Version/9.0 Mobile/13D15 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13A452 Safari/601.1.46 (000412)",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/12.0.68608 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.87 Mobile/13A452 Safari/601.1.46 (000715)",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.87 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13B143 Safari/600.1.4 (000381)",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.1.66360 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13B143 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46 (000381)",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 Shelter/1.0.0 (YmqLQeAh3Z-nBdz2i87Rf) ",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13C143 Safari/600.1.4 (000718)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.4 Mobile/13E5181f Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13A15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E233 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/13.1.72140 Mobile/13E233 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13E233 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E238 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.109 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.4 Mobile/13A452 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/44.0.2403.67 Mobile/13B143 Safari/600.1.4 (000073)",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/3.0 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/14.1.119979954 Mobile/13E238 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E234 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E237 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/15.1.122860578 Mobile/13F69 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.64 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F72 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.104 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.77 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/4.0 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.104 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/16.0.124986583 Mobile/13F69 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/2.0 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G34 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/52.0.2743.84 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E188a Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/17.0.128207670 Mobile/13G35 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13G34 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G35 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G35",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/52.0.2743.84 Mobile/13G35 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/5.0 Mobile/13G35 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 iPadApp",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G35 Safari/601.1 MXiOS/4.9.0.60",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.0.130791545 Mobile/13G35 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G36 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.0.130791545 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 7_1 like Mac OS X) AppleWebKit/537.51.3 (KHTML, like Gecko) Version/7.0 Mobile/11A4149 Safari/9537.72",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.1.132077863 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.86 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.109 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.109 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OSX) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A452 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13D11",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36 Safari/601.1.46 Sleipnir/4.3.0m",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.86 Mobile/13A452 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46.140 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.66 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.91 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 Safari/601.1.46 Sleipnir/4.3.2m",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/5.3.48993 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.66 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.77 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/55.0.2883.79 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/55.0.2883.79 Mobile/13F69 Safari/601.1.46",
| |
= str_value[:-1] # trim off '%' character
return float(float_part) / 100.0
class ST_PlaceholderSize(XsdTokenEnumeration):
"""
Valid values for <p:ph> sz (size) attribute
"""
FULL = "full"
HALF = "half"
QUARTER = "quarter"
_members = (FULL, HALF, QUARTER)
class ST_PositiveCoordinate(XsdLong):
@classmethod
def convert_from_xml(cls, str_value):
int_value = super(ST_PositiveCoordinate, cls).convert_from_xml(str_value)
return Emu(int_value)
@classmethod
def validate(cls, value):
cls.validate_int_in_range(value, 0, 27273042316900)
class ST_PositiveFixedAngle(ST_Angle):
"""Valid values for `a:lin@ang`.
60000ths of a degree rotation, constained to positive angles less than
360 degrees.
"""
@classmethod
def convert_to_xml(cls, degrees):
"""Convert signed angle float like -427.42 to int 60000 per degree.
Value is normalized to a positive value less than 360 degrees.
"""
if degrees < 0.0:
degrees %= -360
degrees += 360
elif degrees > 0.0:
degrees %= 360
return str(int(round(degrees * cls.DEGREE_INCREMENTS)))
class ST_PositiveFixedPercentage(ST_Percentage):
"""Percentage value between 0 and 100% like 42000 or '42.0%'
Either an integer literal representing 1000ths of a percent
(e.g. "42000"), or a floating point literal with a '%' suffix
(e.g. "42.0%). Value is constrained to range of 0% to 100%. The source
value is a float between 0.0 and 1.0.
"""
@classmethod
def validate(cls, value):
cls.validate_float_in_range(value, 0.0, 1.0)
class ST_PositivePercentage(ST_Percentage):
"""Percentage value is positive or 9'
Either an integer literal representing 1000ths of a percent
(e.g. "42000"), or a floating point literal with a '%' suffix
(e.g. "42.0%). Value is constrained to range of 0% to 100%. The source
value is a float between 0.0 and 1.0.
"""
@classmethod
def validate(cls, value):
cls.validate_float(value)
if value < 0:
raise TypeError("value must be 0 or positive, got %s" % value)
class ST_RelationshipId(XsdString):
pass
class ST_SlideId(XsdUnsignedInt):
@classmethod
def validate(cls, value):
cls.validate_int_in_range(value, 256, 2147483647)
class ST_SlideSizeCoordinate(BaseIntType):
@classmethod
def convert_from_xml(cls, str_value):
return Emu(str_value)
@classmethod
def validate(cls, value):
cls.validate_int(value)
if value < 914400 or value > 51206400:
raise ValueError(
"value must be in range(914400, 51206400) (1-56 inches), got"
" %d" % value
)
class ST_Style(XsdUnsignedByte):
@classmethod
def validate(cls, value):
cls.validate_int_in_range(value, 1, 48)
class ST_StyleMatrixColumnIndex(XsdUnsignedInt):
""" style index integers """
pass
class ST_TargetMode(XsdString):
"""
The valid values for the ``TargetMode`` attribute in a Relationship
element, either 'External' or 'Internal'.
"""
@classmethod
def validate(cls, value):
cls.validate_string(value)
if value not in ("External", "Internal"):
raise ValueError(
"must be one of 'Internal' or 'External', got '%s'" % value
)
class ST_TextFontScalePercentOrPercentString(BaseFloatType):
"""
Valid values for the `fontScale` attribute of ``<a:normAutofit>``.
Translates to a float value.
"""
@classmethod
def convert_from_xml(cls, str_value):
if str_value.endswith("%"):
return float(str_value[:-1]) # trim off '%' character
return int(str_value) / 1000.0
@classmethod
def convert_to_xml(cls, value):
return str(int(value * 1000.0))
@classmethod
def validate(cls, value):
BaseFloatType.validate(value)
if value < 1.0 or value > 100.0:
raise ValueError(
"value must be in range 1.0..100.0 (percent), got %s" % value
)
class ST_TextFontSize(BaseIntType):
@classmethod
def validate(cls, value):
cls.validate_int_in_range(value, 100, 400000)
class ST_TextIndentLevelType(BaseIntType):
@classmethod
def validate(cls, value):
cls.validate_int_in_range(value, 0, 8)
class ST_TextSpacingPercentOrPercentString(BaseFloatType):
@classmethod
def convert_from_xml(cls, str_value):
if str_value.endswith("%"):
return cls._convert_from_percent_literal(str_value)
return int(str_value) / 100000.0
@classmethod
def _convert_from_percent_literal(cls, str_value):
float_part = str_value[:-1] # trim off '%' character
percent_value = float(float_part)
lines_value = percent_value / 100.0
return lines_value
@classmethod
def convert_to_xml(cls, value):
"""
1.75 -> '175000'
"""
lines = value * 100000.0
return str(int(round(lines)))
@classmethod
def validate(cls, value):
cls.validate_float_in_range(value, 0.0, 132.0)
class ST_TextSpacingPoint(BaseIntType):
@classmethod
def convert_from_xml(cls, str_value):
"""
Reads string integer centipoints, returns |Length| value.
"""
return Centipoints(int(str_value))
@classmethod
def convert_to_xml(cls, value):
length = Emu(value) # just to make sure
return str(length.centipoints)
@classmethod
def validate(cls, value):
cls.validate_int_in_range(value, 0, 20116800)
class ST_TextTypeface(XsdString):
pass
class ST_TextWrappingType(XsdTokenEnumeration):
"""
Valid values for <a:bodyPr wrap=""> attribute
"""
NONE = "none"
SQUARE = "square"
_members = (NONE, SQUARE)
class ST_UniversalMeasure(BaseSimpleType):
@classmethod
def convert_from_xml(cls, str_value):
float_part, units_part = str_value[:-2], str_value[-2:]
quantity = float(float_part)
multiplier = {
"mm": 36000,
"cm": 360000,
"in": 914400,
"pt": 12700,
"pc": 152400,
"pi": 152400,
}[units_part]
emu_value = Emu(int(round(quantity * multiplier)))
return emu_value
class ST_FontCollectionIndex(XsdTokenEnumeration):
""" Valid Values for Font Collections """
MAJOR = "major"
MINOR = "minor"
NONE = "none"
_members = (MAJOR, MINOR, NONE)
class ST_TextPanose(XsdHexBinary):
""" Panose is an alpha numeric hexidecimal string
Length of 10 bytes (20 characters).
"""
pass
class ST_TextPitchFamily(XsdInt):
""" Verification only as an integer """
pass
class ST_TextCharset(XsdInt):
""" Verification only as an integer """
pass
class ST_TextAutoNumType(XsdString):
""" Verification only as a string """
pass
class ST_TextBulletStartAtNum(XsdInt):
""" Verification of StartAt value for Auto Numbers """
@classmethod
def validate(cls, value):
if not isinstance(value, numbers.Integral):
raise TypeError("value must be an integral type, got %s" % type(value))
cls.validate_int_in_range(value, 1, 32767)
class ST_TextBulletSizePercent(ST_Percentage):
""" Verification for Bullet Size Percentage Vales """
@classmethod
def validate(cls, value):
cls.validate_float_in_range(value, .25, 4)
class ST_SystemColorVal(XsdTokenEnumeration):
"""
Valid values for system color attribute
"""
color_values = (
"scrollBar",
"background",
"activeCaption",
"inactiveCaption",
"menu",
"window",
"windowFrame",
"menuText",
"windowText",
"captionText",
"activeBorder",
"inactiveBorder",
"appWorkspace",
"highlight",
"highlightText",
"btnFace",
"btnShadow",
"grayText",
"btnText",
"inactiveCaptionText",
"btnHighlight",
"3dDkShadow",
"3dLight",
"infoText",
"infoBk",
"hotLight",
"gradientActiveCaption",
"gradientInactiveCaption",
"menuHighlight",
"menuBar"
)
_members = color_values
class ST_TextStrikeType(XsdTokenEnumeration):
"""
Valid values for text strikethrough attribute
"""
color_values = (
"noStrike",
"sngStrike",
"dblStrike"
)
_members = color_values
class ST_PresetColorVal(XsdTokenEnumeration):
"""
Valid vlaues for preset color attribute
"""
color_values = (
"aliceBlue",
"antiqueWhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"black",
"blanchedAlmond",
"blue",
"blueViolet",
"brown",
"burlyWood",
"cadetBlue",
"chartreuse",
"chocolate",
"coral",
"cornflowerBlue",
"cornsilk",
"crimson",
"cyan",
"dkBlue",
"dkCyan",
"dkGoldenrod",
"dkGray",
"dkGreen",
"dkKhaki",
"dkMagenta",
"dkOliveGreen",
"dkOrange",
"dkOrchid",
"dkRed",
"dkSalmon",
"dkSeaGreen",
"dkSlateBlue",
"dkSlateGray",
"dkTurquoise",
"dkViolet",
"deepPink",
"deepSkyBlue",
"dimGray",
"dodgerBlue",
"firebrick",
"floralWhite",
"forestGreen",
"fuchsia",
"gainsboro",
"ghostWhite",
"gold",
"goldenrod",
"gray",
"green",
"greenYellow",
"honeydew",
"hotPink",
"indianRed",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderBlush",
"lawnGreen",
"lemonChiffon",
"ltBlue",
"ltCoral",
"ltCyan",
"ltGoldenrodYellow",
"ltGray",
"ltGreen",
"ltPink",
"ltSalmon",
"ltSeaGreen",
"ltSkyBlue",
"ltSlateGray",
"ltSteelBlue",
"ltYellow",
"lime",
"limeGreen",
"linen",
"magenta",
"maroon",
"medAquamarine",
"medBlue",
"medOrchid",
"medPurple",
"medSeaGreen",
"medSlateBlue",
"medSpringGreen",
"medTurquoise",
"medVioletRed",
"midnightBlue",
"mintCream",
"mistyRose",
"moccasin",
"navajoWhite",
"navy",
"oldLace",
"olive",
"oliveDrab",
"orange",
"orangeRed",
"orchid",
"paleGoldenrod",
"paleGreen",
"paleTurquoise",
"paleVioletRed",
"papayaWhip",
"peachPuff",
"peru",
"pink",
"plum",
"powderBlue",
"purple",
"red",
"rosyBrown",
"royalBlue",
"saddleBrown",
"salmon",
"sandyBrown",
"seaGreen",
"seaShell",
"sienna",
"silver",
"skyBlue",
"slateBlue",
"slateGray",
"snow",
"springGreen",
"steelBlue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whiteSmoke",
"yellow",
"yellowGreen"
)
_members = color_values
class ST_ColorSchemeIndex(XsdTokenEnumeration):
"""
Valid values for scheme color attribute
"""
color_values = (
"dk1",
"lt1",
"dk2",
"lt2",
"accent1",
"accent2",
"accent3",
"accent4",
"accent5",
"accent6",
"hlink",
"folHlink",
)
_members = color_values
class ST_AdjCoordinate(BaseSimpleType):
"""
This techincally is a Union of ST_Coordinate and ST_GeomGuideName,
but I haven't figured out how to do that so I'm creating this for now
and we can add verification here if we need to
"""
@classmethod
def convert_from_xml(cls, str_value):
return str_value
@classmethod
def convert_to_xml(cls, value):
return str(value)
@classmethod
def validate(cls, value):
pass
# cls.validate_string(value)
class ST_AdjAngle(BaseSimpleType):
"""
This techincally is a Union of ST_Angle and ST_GeomGuideName,
but I haven't figured out how to do that so I'm creating this for now
and we can add verification here if we need to
"""
@classmethod
def convert_from_xml(cls, str_value):
return str_value
@classmethod
def convert_to_xml(cls, value):
return str(value)
@classmethod
def validate(cls, value):
pass
# cls.validate_string(value)
class ST_PathFillMode(XsdTokenEnumeration):
"""
Valid values for path fill modes
"""
fill_modes = (
"none",
"norm",
"lighten",
"lightenLess",
"darken",
"darkenLess",
)
_members = fill_modes
class ST_LineEndType(XsdTokenEnumeration):
"""
Valid Values for line end types
"""
line_end_types = (
"none",
"triangle",
"stealth",
"diamond",
"oval",
"arrow",
)
_members = line_end_types
class ST_LineEndWidth(XsdTokenEnumeration):
"""
Valid values for line end widths
"""
line_end_widths = (
"sm",
"med",
"lg"
)
_members = line_end_widths
class ST_LineEndLength(XsdTokenEnumeration):
"""
Valid Values for line end lengths
"""
line_end_lengths = (
"sm",
"med",
"lg"
)
_members = line_end_lengths
class ST_LineCap(XsdTokenEnumeration):
"""
Valid Values for Line Cap
"""
line_caps = (
"rnd",
"sq",
"flat"
)
_members = line_caps
class ST_CompoundLine(XsdTokenEnumeration):
"""
Valid Values for Compound Line Types
"""
compound_types = (
"sng",
"dbl",
"thickThin",
"thinThick",
"tri"
)
_members = compound_types
class ST_PenAlignment(XsdTokenEnumeration):
"""
Valid Values for Stroke Pen Alignment
"""
alignments = (
"ctr",
"in",
)
_members = alignments
class ST_ShapeType(XsdTokenEnumeration):
"""
List of Valid Values for Shape types.
This includes both connectors and Autoshapes
"""
shape_types = (
"line",
"lineInv",
"triangle",
"rtTriangle",
"rect",
"diamond",
"parallelogram",
"trapezoid",
"nonIsoscelesTrapezoid",
"pentagon",
"hexagon",
"heptagon",
"octagon",
"decagon",
"dodecagon",
"star4",
"star5",
"star6",
"star7",
"star8",
"star10",
"star12",
"star16",
"star24",
"star32",
"roundRect",
"round1Rect",
"round2SameRect",
"round2DiagRect",
"snipRoundRect",
"snip1Rect",
"snip2SameRect",
"snip2DiagRect",
"plaque",
"ellipse",
"teardrop",
"homePlate",
"chevron",
"pieWedge",
"pie",
"blockArc",
"donut",
"noSmoking",
"rightArrow",
"leftArrow",
"upArrow",
"downArrow",
"stripedRightArrow",
"notchedRightArrow",
"bentUpArrow",
"leftRightArrow",
"upDownArrow",
"leftUpArrow",
"leftRightUpArrow",
"quadArrow",
"leftArrowCallout",
| |
<reponame>bkilian15/cmssw<gh_stars>1-10
from __future__ import print_function
import sys
import os
import logging
import random
import subprocess
import re
import struct
import FWCore.ParameterSet.SequenceTypes as sqt
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.Modules as mod
import FWCore.ParameterSet.Types as typ
import FWCore.ParameterSet.Mixins as mix
from Vispa.Plugins.ConfigEditor.ConfigDataAccessor import ConfigDataAccessor
from FWCore.GuiBrowsers.FileExportPlugin import FileExportPlugin
class DotProducer(object):
def __init__(self,data,options,shapes):
self.data = data
self.options = options
self.shapes = shapes
self.nodes={}
#lists of starts, ends of paths for path-endpath and source-path connections
self.pathstarts=[]
self.pathends=[]
self.endstarts=[]
self.toplevel = self.getTopLevel()
def getTopLevel(self):
#build a dictionary of available top-level objects
all_toplevel={}
if self.data.process():
for tlo in self.data.children(self.data.topLevelObjects()[0]):
children = self.data.children(tlo)
if children:
all_toplevel[tlo._label]=children
else:
#case if we have only an anonymous (non-process) file
#pick up (modules, sequences, paths)
for tlo in self.data.topLevelObjects():
if self.data.type(tlo)=='Sequence':
if 'sequences' in all_toplevel:
all_toplevel['sequences']+=[tlo]
else:
all_toplevel['sequences']=[tlo]
if self.data.type(tlo)=='Path':
if 'paths' in all_toplevel:
all_toplevel['paths']+=[tlo]
else:
all_toplevel['paths']=[tlo]
if self.data.type(tlo) in ('EDAnalyzer','EDFilter','EDProducer','OutputModule'):
self.nodes[self.data.label(tlo)]={'obj':tlo,'n_label':self.nodeLabel(tlo),'n_shape':self.shapes.get(self.data.type(tlo),'plaintext'),'inpath':True}
if self.options['services'] and self.data.type(tlo)=='Service':
self.nodes[self.data.label(tlo)]={'obj':tlo,'n_label':self.nodeLabel(tlo),'n_shape':self.shapes.get(self.data.type(tlo),'plaintext'),'inpath':False}
if self.options['es'] and self.data.type(tlo) in ('ESSource','ESProducer'):
self.nodes[self.data.label(tlo)]={'obj':tlo,'n_label':self.nodeLabel(tlo),'n_shape':self.shapes.get(self.data.type(tlo),'plaintext'),'inpath':False}
return all_toplevel
def seqRecurseChildren(self,obj):
children = self.data.children(obj)
if children:
seqlabel = self.data.label(obj)
if self.options['file']:
seqlabel += '\\n%s:%s' % (self.data.pypackage(obj),self.data.lineNumber(obj))
result='subgraph clusterSeq%s {\nlabel="Sequence %s"\ncolor="%s"\nfontcolor="%s"\nfontname="%s"\nfontsize=%s\n' % (self.data.label(obj),seqlabel,self.options['color_sequence'],self.options['color_sequence'],self.options['font_name'],self.options['font_size'])
for c in children:
result += self.seqRecurseChildren(c)
result+='}\n'
return result
else:
self.nodes[self.data.label(obj)]={'obj':obj,'n_label':self.nodeLabel(obj),'n_shape':self.shapes.get(self.data.type(obj),'plaintext'),'inpath':True}
return '%s\n'%self.data.label(obj)
def recurseChildren(self,obj):
result=[]
children=self.data.children(obj)
if children:
for c in children:
result += self.recurseChildren(c)
else:
result.append(obj)
return result
#write out an appropriate node label
def nodeLabel(self,obj):
result = self.data.label(obj)
if self.options['class']:
result += '\\n%s'%self.data.classname(obj)
if self.options['file']:
result += '\\n%s:%s'%(self.data.pypackage(obj),self.data.lineNumber(obj))
return result
#generate an appropriate URL by replacing placeholders in baseurl
def nodeURL(self,obj):
classname = self.data.classname(obj)
pypath = self.data.pypath(obj)
pyline = self.data.lineNumber(obj)
url = self.options['urlbase'].replace('$classname',classname).replace('$pypath',pypath).replace('$pyline',pyline)
return url
def makePath(self,path,endpath=False):
children = self.recurseChildren(path)
pathlabel = self.data.label(path)
if self.options['file']:
pathlabel += '\\n%s:%s'%(self.data.pypackage(path),self.data.lineNumber(path))
if endpath:
pathresult = 'subgraph cluster%s {\nlabel="%s"\ncolor="%s"\nfontcolor="%s"\nfontname="%s"\nfontsize=%s\n' % (self.data.label(path),pathlabel,self.options['color_endpath'],self.options['color_endpath'],self.options['font_name'],self.options['font_size'])
else:
pathresult = 'subgraph cluster%s {\nlabel="%s"\ncolor="%s"\nfontcolor="%s"\nfontname="%s"\nfontsize=%s\n' % (self.data.label(path),pathlabel,self.options['color_path'],self.options['color_path'],self.options['font_name'],self.options['font_size'])
if self.options['seqconnect']:
if endpath:
self.endstarts.append('endstart_%s'%self.data.label(path))
self.nodes['endstart_%s'%self.data.label(path)]={'obj':path,'n_label':'Start %s'%self.data.label(path),'n_color':'grey','n_shape':'plaintext','inpath':False}
else:
self.pathstarts.append('start_%s'%self.data.label(path))
self.pathends.append('end_%s'%self.data.label(path))
self.nodes['start_%s'%self.data.label(path)]={'obj':path,'n_label':'Start %s'%self.data.label(path),'n_color':'grey','n_shape':'plaintext','inpath':False}
self.nodes['end_%s'%self.data.label(path)]={'obj':path,'n_label':'End %s'%self.data.label(path),'n_color':'grey','n_shape':'plaintext','inpath':False}
labels=[]
for c in children:
#this is also done in seqRecurseChildren, so will be duplicated
#unncessary, but relatively cheap and saves more cff/cfg conditionals
self.nodes[self.data.label(c)]={'obj':c,'n_label':self.nodeLabel(c),'n_shape':self.shapes.get(self.data.type(c),'plaintext'),'inpath':True}
labels.append(self.data.label(c))
if self.options['seqconnect']:
pathresult += '->'.join(labels)+'\n'
else:
if not self.options['seq']:
pathresult += '\n'.join(labels)+'\n'
if self.options['seq']:
if self.data.children(path):
for path_child in self.data.children(path):
pathresult += self.seqRecurseChildren(path_child)
pathresult += '}\n'
if len(labels)>0 and self.options['seqconnect']:
if endpath:
pathresult += 'endstart_%s->%s\n' % (self.data.label(path),labels[0])
else:
pathresult += 'start_%s->%s\n%s->end_%s\n' % (self.data.label(path),labels[0],labels[-1],self.data.label(path))
return pathresult
def producePaths(self):
result=''
if 'paths' in self.toplevel:
for path in self.toplevel['paths']:
result += self.makePath(path)
if self.options['endpath']:
if 'endpaths' in self.toplevel:
for path in self.toplevel['endpaths']:
result += self.makePath(path,True)
if 'sequences' in self.toplevel:
for seq in self.toplevel['sequences']:
result += self.seqRecurseChildren(seq)
return result
def connectPaths(self):
result=''
for p in self.pathends:
for p2 in self.endstarts:
result+="%s->%s\n" % (p,p2)
return result
def connectTags(self):
#if we are connecting by tag, add labelled tag joining lines
#this doesn't have to be exclusive with sequence connection, by stylistically probably should be
result=''
allobjects = [self.nodes[n]['obj'] for n in self.nodes if self.nodes[n]['inpath']]
self.data.readConnections(allobjects)
connections = self.data.connections()
for objects,names in connections.items():
if self.options['taglabel']:
result += '%s->%s[label="%s",color="%s",fontcolor="%s",fontsize=%s,fontname="%s"]\n' % (objects[0],objects[1],names[1],self.options['color_inputtag'],self.options['color_inputtag'],self.options['font_size'],self.options['font_name'])
else:
result += '%s->%s[color="%s"]\n' % (objects[0],objects[1],self.options['color_inputtag'])
return result
def produceSource(self):
#add the source
#if we are connecting sequences, connect it to all the path starts
#if we are connecting sequences and have a schedule, connect it to path #0
result=''
if 'source' in self.toplevel:
for s in self.toplevel['source']:
self.nodes['source']={'obj':s,'n_label':self.data.classname(s),'n_shape':self.shapes['Source']}
if self.options['seqconnect']:
for p in self.pathstarts:
result += 'source->%s\n' % (p)
return result
def produceServices(self):
# add service, eventsetup nodes
# this will usually result in thousands and isn't that interesting
servicenodes=[]
result=''
if self.options['es']:
if 'essources' in self.toplevel:
for e in self.toplevel['essources']:
servicenodes.append(self.data.label(e))
self.nodes[self.data.label(e)]={'obj':e,'n_label':self.nodeLabel(e), 'n_shape':self.shapes['ESSource'],'inpath':False}
if 'esproducers' in self.toplevel:
for e in self.toplevel['esproducers']:
servicenodes.append(self.data.label(e))
self.nodes[self.data.label(e)]={'obj':e,'n_label':self.nodeLabel(e), 'n_shape':self.shapes['ESProducer'],'inpath':False}
if self.options['services']:
if 'services' in self.toplevel:
for s in self.toplevel['services']:
self.servicenodes.append(self.data.label(s))
self.nodes[self.data.label(s)]={'obj':s,'n_label':self.nodeLabel(e), 'n_shape':self.shapes['Service'],'inpath':False}
#find the maximum path and endpath lengths for servicenode layout
maxpath=max([len(recurseChildren(path) for path in self.toplevel.get('paths',(0,)))])
maxendpath=max([len(recurseChildren(path) for path in self.toplevel.get('endpaths',(0,)))])
#add invisible links between service nodes where necessary to ensure they only fill to the same height as the longest path+endpath
#this constraint should only apply for link view
for i,s in enumerate(servicenodes[:-1]):
if not i%(maxpath+maxendpath)==(maxpath+maxendpath)-1:
result+='%s->%s[style=invis]\n' % (s,servicenodes[i+1])
return result
def produceNodes(self):
result=''
for n in self.nodes:
self.nodes[n]['n_fontname']=self.options['font_name']
self.nodes[n]['n_fontsize']=self.options['font_size']
if self.options['url']:
self.nodes[n]['n_URL']=self.nodeURL(self.nodes[n]['obj'])
result += "%s[%s]\n" % (n,','.join(['%s="%s"' % (k[2:],v) for k,v in self.nodes[n].items() if k[0:2]=='n_']))
return result
def produceLegend(self):
"""
Return a legend subgraph using current shape and colour preferences.
"""
return 'subgraph clusterLegend {\nlabel="legend"\ncolor=red\nSource->Producer->Filter->Analyzer\nService->ESSource[style=invis]\nESSource->ESProducer[style=invis]\nProducer->Filter[color="%s",label="InputTag",fontcolor="%s"]\nProducer[shape=%s]\nFilter[shape=%s]\nAnalyzer[shape=%s]\nESSource[shape=%s]\nESProducer[shape=%s]\nSource[shape=%s]\nService[shape=%s]\nsubgraph clusterLegendSequence {\nlabel="Sequence"\ncolor="%s"\nfontcolor="%s"\nProducer\nFilter\n}\n}\n' % (self.options['color_inputtag'],self.options['color_inputtag'],self.shapes['EDProducer'],self.shapes['EDFilter'],self.shapes['EDAnalyzer'],self.shapes['ESSource'],self.shapes['ESProducer'],self.shapes['Source'],self.shapes['Service'],self.options['color_sequence'],self.options['color_sequence'])
def __call__(self):
blocks=[]
if self.options['legend']:
blocks += [self.produceLegend()]
blocks += [self.producePaths()]
if self.options['seqconnect']:
blocks += [self.connectPaths()]
if self.options['tagconnect']:
blocks += [self.connectTags()]
if self.options['source']:
blocks += [self.produceSource()]
if self.options['es'] or self.options['services']:
blocks += [self.produceServices()]
blocks += [self.produceNodes()]
if self.data.process():
return 'digraph configbrowse {\nsubgraph clusterProcess {\nlabel="%s\\n%s"\nfontsize=%s\nfontname="%s"\n%s\n}\n}\n' % (self.data.process().name_(),self.data._filename,self.options['font_size'],self.options['font_name'],'\n'.join(blocks))
else:
return 'digraph configbrowse {\nsubgraph clusterCFF {\nlabel="%s"\nfontsize=%s\nfontname="%s"\n%s\n}\n}\n' % (self.data._filename,self.options['font_size'],self.options['font_name'],'\n'.join(blocks))
class DotExport(FileExportPlugin):
"""
Export a CMSSW config file to DOT (http://www.graphviz.org) markup, either as raw markup or by invoking the dot program, as an image.
"""
option_types={
'legend':('Show Legend','boolean',True),
'source':('Show Source','boolean',True),
'es':('Show Event Setup','boolean',False),
'tagconnect':('Connect InputTags','boolean',True),
'seqconnect':('Connect Module Sequence','boolean',False),
'services':('Show Services','boolean',False),
'endpath':('Show EndPaths','boolean',True),
'seq':('Group Sequences','boolean',True),
'class':('Show Class','boolean',True),
'file':('Show File','boolean',True),
'schedule':('Show Schedule','boolean',False),
'taglabel':('Show Tag Labels','boolean',True),
'color_path':('Path Color','color','#ff00ff'),
'color_endpath':('EndPath Color','color','#ff0000'),
'color_sequence':('Sequence Color','color','#00ff00'),
'color_inputtag':('InputTag Color','color','#0000ff'),
'color_schedule':('Schedule Color','color','#00ffff'),
'url':('Include URLs','boolean',False), #this is only purposeful for png+map mode
'urlprocess':('Postprocess URL (for client-side imagemaps)','boolean',False), #see processMap documentation; determines whether to treat 'urlbase' as a dictionary for building a more complex imagemap or a simple URL
'urlbase':('URL to generate','string',"{'split_x':1,'split_y':2,'scale_x':1.,'scale_y':1.,'cells':[{'top':0,'left':0,'width':1,'height':1,'html_href':'http://cmslxr.fnal.gov/lxr/ident/?i=$classname','html_alt':'LXR','html_class':'LXR'},{'top':1,'left':0,'width':1,'height':1,'html_href':'http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/CMSSW/$pypath?view=markup#$pyline','html_alt':'CVS','html_class':'CVS'}]}"), #CVS markup view doesn't allow line number links, only annotate view (which doesn't then highlight the code...)
'node_graphs':('Produce individual graphs focussing on each node','boolean',False),
'node_graphs_restrict':('Select which nodes to make node graphs for','string',''),
'node_depth':('Search depth for individual node graphs','int',1),
'font_name':('Font name','string','Times-Roman'),
'font_size':('Font size','int',8),
'png_max_size':('Maximum edge for png image','int',16768)
}
plugin_name='DOT Export'
file_types=('bmp','dot','eps','gif','jpg','pdf','png','ps','svg','tif','png+map','stdout')
def __init__(self):
FileExportPlugin.__init__(self)
#could make these customizeable in the general options dict
self.shapes={}
self.shapes['EDProducer']='box'
self.shapes['EDFilter']='invhouse'
self.shapes['EDAnalyzer']='house'
self.shapes['OutputModule']='invtrapezium'
self.shapes['ESSource']='Mdiamond'
self.shapes['ESProducer']='Msquare'
self.shapes['Source']='ellipse'
self.shapes['Service']='diamond'
def dotIndenter(self,dot):
"""
Simple indenter for dot output, mainly to prettify it for human reading.
"""
spaces = lambda d: ''.join([space]*d)
newdot = ""
depth = 0
space = ' '
for line in dot.splitlines():
if '{' in line:
newdot += spaces(depth)+line+'\n'
depth += 1
elif '}' in line:
depth -= 1
newdot += spaces(depth)+line+'\n'
else:
newdot += spaces(depth)+line+'\n'
return newdot
def selectNode(self,dotdata,node,depth_s):
depth = int(depth_s)
backtrace=False
if depth<0:
depth = abs(depth)
backtrace=True
re_link = re.compile(r'^\s*?(\w*?)->(\w*?)(?:\[.*?\])?$',re.MULTILINE)
re_nodedef = re.compile(r'^\s*?(\w*?)(?:\[.*?\])?$',re.MULTILINE)
re_title = re.compile(r'^label=\"(.*?)\"$',re.MULTILINE)
re_nodeprops = re.compile(r'^\s*?('+node+r')\[(.*?)\]$',re.MULTILINE)
nodes = re_nodedef.findall(dotdata)
if not node in nodes:
raise Exception("Selected node (%s) not found" % (node))
links_l = re_link.findall(dotdata)
links = {}
for link in links_l:
if not backtrace:
if link[0] in links:
links[link[0]] += [link[1]]
else:
links[link[0]] = [link[1]]
if link[1] in links:
links[link[1]] += [link[0]]
else:
links[link[1]] = [link[0]]
def node_recursor(links,depthleft,start):
if start in links:
if depthleft==0:
return links[start]+[start]
else:
result = [start]
for l in links[start]:
result.extend(node_recursor(links,depthleft-1,l))
return result
else:
return [start]
include_nodes = set(node_recursor(links,depth-1,node))
include_nodes.add(node)
class link_replacer:
def __init__(self,include_nodes):
self.include_nodes=include_nodes
def __call__(self,match):
if match.group(1) in self.include_nodes and match.group(2) in self.include_nodes:
return match.group(0)
return ''
class node_replacer:
def __init__(self,include_nodes):
self.include_nodes=include_nodes
def __call__(self,match):
if match.group(1) in self.include_nodes:
return match.group(0)
return ''
dotdata = re_link.sub(link_replacer(include_nodes),dotdata)
dotdata = re_nodedef.sub(node_replacer(include_nodes),dotdata)
dotdata = re_title.sub(r'label="\g<1>\\nDepth '+str(depth_s)+r' from node ' +node+r'"',dotdata,1)
dotdata = re_nodeprops.sub('\\g<1>[\\g<2>,color="red"]',dotdata,1)
return dotdata
def processMap(self,mapdata):
"""
Re-process the client-side image-map produces when png+map is selected.
DOT will only ever put a single URL in the imagemap corresponding to a node, with the 'url' parameter (after html encoding) as the url, and the 'title' parameter as the title. This isn't useful behaviour for our purposes. We want probably several link areas, or a javascript link to make a menu appear, or other more complex behaviour.
If the option 'urlprocess' is turned on, this function is called, and it expects to find a dictionary it can eval in the url parameter. I can't think of a less messy way of passing | |
not self.CTvertices:
return
text = """
# This file was automatically created by The UFO_usermod
from object_library import all_vertices, all_CTvertices, Vertex, CTVertex
import particles as P
import couplings as C
import lorentz as L
"""
text += self.create_file_content(self.CTvertices)
ff = open(os.path.join(outputdir, 'CT_vertices.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_couplings(self, outputdir):
""" """
text = """
# This file was automatically created by The UFO_usermod
from object_library import all_couplings, Coupling
"""
text += self.create_file_content(self.couplings)
ff = open(os.path.join(outputdir, 'couplings.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_lorentz(self, outputdir):
""" """
text = """
# This file was automatically created by The UFO_usermod
from object_library import all_lorentz, Lorentz
"""
text += self.create_file_content(self.lorentz)
ff = open(os.path.join(outputdir, 'lorentz.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_parameters(self, outputdir):
""" """
text = """
# This file was automatically created by The UFO_usermod
from object_library import all_parameters, Parameter
"""
text += self.create_file_content(self.parameters)
ff = open(os.path.join(outputdir, 'parameters.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_ctparameters(self, outputdir):
""" """
if not self.CTparameters:
return
text = """
# This file was automatically created by The UFO_usermod
from object_library import all_CTparameters, CTParameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
"""
text += self.create_file_content(self.CTparameters)
ff = open(os.path.join(outputdir, 'CT_parameters.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_orders(self, outputdir):
""" """
text = """
# This file was automatically created by The UFO_usermod
from object_library import all_orders, CouplingOrder
"""
text += self.create_file_content(self.orders)
ff = open(os.path.join(outputdir, 'coupling_orders.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_functions(self, outputdir):
""" """
text = """
# This file was automatically created by The UFO_usermod
import cmath
from object_library import all_functions, Function
"""
text += self.create_file_content(self.functions)
ff = open(os.path.join(outputdir, 'function_library.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_propagators(self, outputdir):
""" """
text = """
# This file was automatically created by The UFO_usermod
from object_library import all_propagators, Propagator
"""
text += self.create_file_content(self.propagators)
ff = open(os.path.join(outputdir, 'propagators.py'), 'w')
ff.writelines(text)
ff.close()
return
def write_external_files(self, outputdir):
"""Copy/merge the routines written in Fortran/C++/pyhton"""
#1. Special case for the formfactor written in Fortran
re_fct = re.compile('''^\s{7,70}[\w\s]*function (\w*)\(''',re.M+re.I)
present_fct = set()
for dirpath in self.all_path:
if os.path.exists(pjoin(dirpath, 'Fortran', 'functions.f')):
text = open(pjoin(dirpath, 'Fortran', 'functions.f')).read()
new_fct = re_fct.findall(text)
nb_old = len(present_fct)
nb_added = len(new_fct)
new_fct = set([f.lower() for f in new_fct])
present_fct.update(new_fct)
if len(present_fct) < nb_old + nb_added:
logger.critical('''Some Functions in functions.f are define in more than one model.
This require AT LEAST manual modification of the resulting file. But more likely the
model need to be consider as un-physical! Use it very carefully.''')
if not os.path.exists(pjoin(outputdir, 'Fortran')):
os.mkdir(pjoin(outputdir, 'Fortran'))
fsock = open(pjoin(outputdir, 'Fortran','functions.f'),'a')
fsock.write(text)
fsock.close()
#2. Ohter files present in Fortran/Cpp/Python directory
# ASk user to handle it if any!
for dirpath in self.all_path:
for subdir in ['Fortran', 'CPP', 'Python']:
if os.path.exists(pjoin(dirpath, subdir)):
for filepath in os.listdir(pjoin(dirpath, subdir)):
if filepath == 'functions.f':
continue
if '.' not in filepath:
continue
logger.warning('Manual HELAS routine associated to the model. Those are not modified automaticaly!! So you need to manually checked them')
nb = 0
name, extension = filepath.rsplit('.', 1)
while 1:
filename = '%s%s%s' %(name, '.moved' * nb, extension)
if os.path.exists(pjoin(outputdir, subdir, filename)):
nb+=1
else:
break
if not os.path.exists(pjoin(outputdir, subdir)):
os.mkdir(pjoin(outputdir, subdir))
files.cp(pjoin(dirpath, subdir, filepath), pjoin(outputdir, subdir, filename))
def get_particle(self, name):
""" """
for part in self.particles:
if part.name == name:
return part
raise USRMODERROR('no particle %s in the model' % name)
def add_parameter(self, parameter, identify_pid={}):
"""wrapper to call the correct function"""
if parameter.nature == 'internal':
self.add_internal_parameter(parameter)
else:
self.add_external_parameter(parameter, identify_pid)
def add_particle(self, particle, identify=None):
"""Add a particle in a consistent way"""
name = particle.name
if identify:
name = identify
old_part = next((p for p in self.particles if p.name==name), None)
if not old_part:
first = True
for p in self.particles:
if p.name.lower() == name.lower():
if not first:
raise Exception
else:
first =False
old_part = p
if old_part:
#Check if the two particles have the same pdgcode
if old_part.pdg_code == particle.pdg_code:
particle.replace = old_part
return self.check_mass_width_of_particle(old_part, particle)
elif identify:
if particle.spin != old_part.spin:
raise USRMODERROR("identify particles should have the same spin")
elif particle.color != old_part.color:
raise USRMODERROR("identify particles should have the same color")
particle.replace = old_part
return self.check_mass_width_of_particle(old_part, particle)
else:
logger.warning('The particle name \'%s\' is present in both model with different pdg code' % name)
logger.warning('The particle coming from the plug-in model will be rename to \'%s%s\'' % (name, self.addon))
particle.name = '%s%s' % (name, self.addon)
self.particles.append(particle)
return
elif identify:
raise USRMODERROR("Particle %s is not in the model" % identify)
pdg = particle.pdg_code
if pdg in self.particle_dict:
particle.replace = self.particle_dict[pdg]
return self.check_mass_width_of_particle(self.particle_dict[pdg], particle)
else:
if hasattr(particle, 'replace'):
del particle.replace
self.particles.append(particle)
def check_mass_width_of_particle(self, p_base, p_plugin):
# Check the mass
if p_base.mass.name != p_plugin.mass.name:
#different name but actually the same
if p_plugin.mass.name in self.old_new:
if self.old_new[p_plugin.mass.name] != p_base.mass.name:
raise USRMODERROR('Some inconsistency in the mass assignment in the model: equivalent of %s is %s != %s ' % ( p_plugin.mass.name, self.old_new[p_plugin.mass.name], p_base.mass.name))
elif p_base.mass.name.lower() == 'zero':
p_base.mass = p_plugin.mass
elif p_plugin.mass.name.lower() == 'zero':
pass
else:
misc.sprint(p_base.mass.value, p_plugin.mass.value, dir(p_base.mass))
misc.sprint(p_base.mass.nature, p_plugin.mass.nature)
misc.sprint(self.old_new)
raise USRMODERROR('Some inconsistency in the mass assignment in the model\n' + \
' Mass: %s and %s\n' %(p_base.mass.name, p_plugin.mass.name) + \
' conflict name %s\n' % self.old_new + \
' pdg_code: %s %s' % (p_base.pdg_code, p_plugin.pdg_code))
# Check the width
if p_base.width.name != p_plugin.width.name:
#different name but actually the same
if p_plugin.width.name in self.old_new:
if self.old_new[p_plugin.width.name] != p_base.width.name:
raise USRMODERROR('Some inconsistency in the mass assignment in the model')
elif p_base.width.name.lower() == 'zero':
p_base.width = p_plugin.width
elif p_plugin.width.name.lower() == 'zero':
pass
else:
raise USRMODERROR('Some inconsistency in the mass assignment in the model')
return
def add_external_parameter(self, parameter, identify_pid):
"""adding a param_card parameter inside the current model.
if the parameter block/lhcode already exists then just do nothing
(but if the name are different then keep the info for future translation)
If the name already exists in the model. raise an exception.
"""
name = parameter.name
# check if a parameter already has this name
old_param = next((p for p in self.parameters if p.name==name), None)
if old_param:
if old_param.lhablock == parameter.lhablock and \
old_param.lhacode == parameter.lhacode:
return #Nothing to do!
else:
logger.info('The two model defines the parameter \'%s\'\n' % parameter.name +
' the original model for %s :%s\n' %(old_param.lhablock, old_param.lhacode)+
' the plugin for %s :%s\n' %(parameter.lhablock,parameter.lhacode)+
' We will rename the one from the plugin to %s%s' % (parameter.name, self.addon))
if old_param.nature == 'internal':
logger.warning('''The parameter %s is actually an internal parameter of the base model.
his value is given by %s.
If those two parameters are expected to be identical, you need to provide the value in the param_card according to this formula.
''')
#add the parameter with a new name.
self.old_new[parameter.name] = '%s%s' % (parameter.name, self.addon)
parameter.name = '%s%s' % (parameter.name, self.addon)
#
#self.parameters.append(parameter)
#return
#check if a parameter already has this lhablock/code information
lhacode = parameter.lhacode
if parameter.lhablock.lower() in ['mass', 'decay']:
if int(parameter.lhacode[0]) in identify_pid:
lhacode = [identify_pid[int(parameter.lhacode[0])]]
old_param = next((p for p in self.parameters if p.lhacode==lhacode \
and p.lhablock==parameter.lhablock), None)
if old_param:
logger.info('The two model defines the block \'%s\' with id \'%s\' with different parameter name \'%s\', \'%s\'\n'\
% (old_param.lhablock, old_param.lhacode, parameter.name, old_param.name) + \
' We will merge those two parameters in a single one')
if parameter.name in list(self.old_new.values()):
key = [k for k in self.old_new if self.old_new[k] == parameter.name][0]
self.old_new[key] = old_param.name
self.old_new[parameter.name] = old_param.name
else:
self.old_new[parameter.name] = old_param.name
# self.add_internal_parameter(iden_param)
elif parameter.lhablock.lower() in ['mass', 'decay'] and int(parameter.lhacode[0]) in identify_pid:
# this means that the parameter is an internal parameter in the original model...
#find it via the particle name
orig_particle = self.particle_dict[lhacode[0]]
if parameter.lhablock.lower() == 'mass':
old_param = orig_particle.mass
else:
old_param = orig_particle.width
if old_param.name.lower() == 'zero':
#Just add the new parameter to the current list
self.parameters.append(parameter)
self.new_external.append(parameter)
else:
logger.info('The two model defines the parameter for block \'%s\' with id \'%s\' with different parameter name \'%s\', \'%s\'\n'\
% (parameter.lhablock.lower(), lhacode[0], parameter.name, old_param.name) + \
' We will merge those two parameters in a single one')
| |
# This file is part of astro_metadata_translator.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
__all__ = ("read_index", "calculate_index", "index_files", "process_index_data")
"""Functions to support file indexing."""
import collections.abc
import json
import logging
import os
import sys
from copy import deepcopy
from .observationInfo import ObservationInfo
from .observationGroup import ObservationGroup
from .headers import merge_headers
from .file_helpers import read_file_info
log = logging.getLogger(__name__)
COMMON_KEY = "__COMMON__"
CONTENT_KEY = "__CONTENT__"
def index_files(files, root, hdrnum, print_trace, content, outstream=sys.stdout, errstream=sys.stderr):
"""Create an index from the supplied files.
No file is written. The Python structure returned is suitable
for writing.
Parameters
----------
files : iterable of `str`
Paths to the files to be indexed. They do not have to all be
in a single directory but all content will be indexed into a single
index.
root : `str`
Directory root that can be combined with each file (if the supplied)
file is relative. Will be ignored if `None`.
hdrnum : `int`
The HDU number to read. The primary header is always read and
print_trace : `bool`
If there is an error reading the file and this parameter is `True`,
a full traceback of the exception will be reported. If `False` prints
a one line summary of the error condition. If `None` the exception
will be allowed.
content : `str`
Form of data to write in index file. Options are:
``translated`` (default) to write ObservationInfo to the index;
``metadata`` to write native metadata headers to the index.
The index file is called ``{mode}_index.json``
outstream : `io.StringIO`, optional
Output stream to use for standard messages. Defaults to `sys.stdout`.
errstream : `io.StringIO`, optional
Stream to send messages that would normally be sent to standard
error. Defaults to `sys.stderr`.
Returns
-------
file_index : `dict` of [`str`, `dict`]
The headers in form suitable for writing to an index. The keys will
be ``__COMMON__`` for shared content, ``__CONTENT_`` to record the
content mode used to construct the index, and paths to the files. The
paths will be the supplied paths and will not include any supplied
``root``.
okay : `list` of `str`
All the files that were processed successfully.
failed : `list` of `str`
All the files that could not be processed. Will be empty if
``print_trace`` is not `None`.
"""
if content not in ("translated", "metadata"):
raise ValueError("Unrecognized mode {mode}")
failed = []
okay = []
content_by_file = {} # Mapping of path to file content
for file in sorted(files):
if root is not None:
path = os.path.join(root, file)
else:
path = file
simple = read_file_info(path, hdrnum, print_trace, content, "simple", outstream, errstream)
if simple is None:
failed.append(path)
continue
else:
okay.append(path)
# Store the information indexed by the filename within dir
content_by_file[file] = simple
output = calculate_index(content_by_file, content)
return output, okay, failed
def calculate_index(headers, content_mode):
"""Calculate an index data structure from the supplied headers.
Parameters
----------
headers : `dict` of [`str`, `dict`]
The headers indexed by filename.
content_mode : `str`
The mode associated with these headers. Not used other than to
store the information in the data structure for later use on
deserialization.
Returns
-------
index_ : `dict` of [`str`, `dict`]
The headers in form suitable for writing to an index.
"""
if content_mode not in ("metadata", "translated"):
raise ValueError(f"Unrecognized mode for index creation: {content_mode}")
# Merge all the information into a primary plus diff
merged = merge_headers(headers.values(), mode="diff")
# For a single file it is possible that the merged contents
# are not a dict but are an LSST-style PropertyList. JSON needs
# dict though.
if not isinstance(merged, collections.abc.Mapping):
merged = dict(merged)
# The structure to write to file is intended to look like (in YAML):
# __COMMON__:
# KEY1: value1
# KEY2: value2
# FILE1:
# KEY3: value3a
# FILE2:
# KEY3: value3b
# if there was only one file there will not be a diff but we
# want it to look like there was.
diff_dict = merged.pop("__DIFF__", [dict()])
# Put the common headers first in the output.
# Store the mode so that we can work out how to read the file in
output = {CONTENT_KEY: content_mode, COMMON_KEY: merged}
for file, diff in zip(headers, diff_dict):
output[file] = diff
return output
def read_index(path, force_dict=False):
"""Read an index file.
Parameters
----------
path : `str`
Path to the index file.
force_dict : `bool`, optional
If `True` the structure returned will always be a dict keyed
by filename.
Returns
-------
index_ : `ObservationGroup` or `dict[str, Union[dict, ObservaitonInfo]]`
The return content matches that returned by `process_index_data`.
"""
if not path.endswith(".json"):
raise ValueError(f"Index files must be in .json format; got {path}")
with open(path, "r") as fd:
content = json.loads(fd.read())
return process_index_data(content, force_dict=force_dict)
def process_index_data(content, force_metadata=False, force_dict=False):
"""Process the content read from a JSON index file.
Parameters
----------
content : `dict`
Data structure stored in JSON index file converted to simple python
form.
force_metadata : `bool`, optional
By default the content returned will match the original form that
was used for the index. If this parameter is `True` an index of
`ObservationInfo` will be returned as if it was simple dict content.
force_dict : `bool`, optional
If `True` the structure returned will always be a dict keyed
by filename.
Returns
-------
index : `ObservationGroup` or `dict` of [`str`, `dict`]
If the index file referred to `ObservationInfo` this will return
an `ObservationGroup`, otherwise a `dict` will be returned with the
keys being paths to files and the values being the keys and values
stored in the index (with common information merged in). This
can be overridden using the ``force_metadata`` parameter. If
``force_dict`` is `True` a `dict` will be returned with filename
keys even if the index file refers to `ObservationInfo` (the values
will be `ObservationInfo` unless ``force_metadata`` is `True`).
Notes
-----
File keys will be relative to the location of the index file.
"""
if COMMON_KEY not in content:
raise ValueError(f"No '{COMMON_KEY}' key found in dict. Does not look like an index data structure.")
# Copy the input structure so we can update in place
unpacked = deepcopy(content)
content_mode = unpacked.pop(CONTENT_KEY, None)
if force_metadata:
content_mode = "metadata"
elif content is None:
log.warning("No '%s' key in data structure, assuming 'metadata'", CONTENT_KEY)
content_mode = "metadata"
# The common headers will be copied into each header
common = unpacked.pop(COMMON_KEY)
for file in unpacked:
unpacked[file].update(common)
if content_mode == "metadata":
# nothing more to be done
return unpacked
obs_infos = []
by_file = {}
for file, hdr in unpacked.items():
info = ObservationInfo.from_simple(hdr)
info.filename = file
obs_infos.append(info)
by_file[file] = info
if force_dict:
return by_file
return ObservationGroup(obs_infos)
def read_sidecar(path):
"""Read a metadata sidecar file.
Parameters
----------
path : `str`
Path to the sidecar file.
Returns
-------
info : `ObservationInfo` or `dict` of [`str`, `dict`]
If the sidecar file referred to `ObservationInfo` this will return
an `ObservationInfo`, otherwise a `dict` will be returned.
"""
if not path.endswith(".json"):
raise ValueError(f"Sidecar files must be in .json format; got {path}")
with open(path, "r") as fd:
content = json.loads(fd.read())
return process_sidecar_data(content)
def process_sidecar_data(content, force_metadata=False):
"""Process the content read from a JSON sidecar file.
Parameters
----------
content : `dict`
Data structure stored in JSON sidecar file converted to simple python
form.
force_metadata : `bool`, optional
By default the content returned will match the original form that
was used for the sidecar. If this parameter is `True` a sidecar of
`ObservationInfo` will be returned as if it was simple dict content.
Returns
-------
info : `ObservationInfo` or `dict` of [`str`, `dict`]
If the sidecar file referred to `ObservationInfo` this will return
an `ObservationGroup`, otherwise a `dict` will be returned. This
can be overridden using the ``force_metadata`` parameter.
"""
if not isinstance(content, dict):
raise TypeError(f"Content of sidecar must be a dict, not {type(content)}")
# Copy the input structure so | |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
import json
import logging
import sqlparse
import sys
from django.urls import reverse
from django.db.models import Q
from django.views.decorators.http import require_GET, require_POST
import opentracing.tracer
from azure.abfs.__init__ import abfspath
from desktop.conf import TASK_SERVER, ENABLE_CONNECTORS
from desktop.lib.i18n import smart_str
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document2, Document, __paginate, _get_gist_document, FilesystemException
from indexer.file_format import HiveFormat
from indexer.fields import Field
from metadata.conf import OPTIMIZER
from notebook.conf import EXAMPLES
from notebook.connectors.base import Notebook, QueryExpired, SessionExpired, QueryError, _get_snippet_name, patch_snippet_for_connector
from notebook.connectors.hiveserver2 import HS2Api
from notebook.decorators import api_error_handler, check_document_access_permission, check_document_modify_permission
from notebook.models import escape_rows, make_notebook, upgrade_session_properties, get_api, _get_dialect_example
if sys.version_info[0] > 2:
from urllib.parse import unquote as urllib_unquote
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
from urllib import unquote as urllib_unquote
LOG = logging.getLogger(__name__)
DEFAULT_HISTORY_NAME = ''
@require_POST
@api_error_handler
def create_notebook(request):
response = {'status': -1}
editor_type = request.POST.get('type', 'notebook')
gist_id = request.POST.get('gist')
directory_uuid = request.POST.get('directory_uuid')
is_blank = request.POST.get('blank', 'false') == 'true'
if gist_id:
gist_doc = _get_gist_document(uuid=gist_id)
statement = json.loads(gist_doc.data)['statement']
editor = make_notebook(
name='',
description='',
editor_type=editor_type,
statement=statement,
is_presentation_mode=True
)
else:
editor = Notebook()
if EXAMPLES.AUTO_OPEN.get() and not is_blank:
document = _get_dialect_example(dialect=editor_type)
if document:
editor = Notebook(document=document)
editor = upgrade_session_properties(request, editor)
data = editor.get_data()
if editor_type != 'notebook':
data['name'] = ''
data['type'] = 'query-%s' % editor_type # TODO: Add handling for non-SQL types
data['directoryUuid'] = directory_uuid
editor.data = json.dumps(data)
response['notebook'] = editor.get_data()
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def create_session(request):
response = {'status': -1}
session = json.loads(request.POST.get('session', '{}'))
properties = session.get('properties', [])
response['session'] = get_api(request, session).create_session(lang=session['type'], properties=properties)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def close_session(request):
response = {'status': -1}
session = json.loads(request.POST.get('session', '{}'))
response['session'] = get_api(request, {'type': session['type']}).close_session(session=session)
response['status'] = 0
return JsonResponse(response)
def _execute_notebook(request, notebook, snippet):
response = {'status': -1}
result = None
history = None
active_executable = None
historify = (notebook['type'] != 'notebook' or snippet.get('wasBatchExecuted')) and not notebook.get('skipHistorify')
try:
try:
sessions = notebook.get('sessions') and notebook['sessions'] # Session reference for snippet execution without persisting it
active_executable = json.loads(request.POST.get('executable', '{}')) # Editor v2
# TODO: Use statement, database etc. from active_executable
if historify:
history = _historify(notebook, request.user)
notebook = Notebook(document=history).get_data()
interpreter = get_api(request, snippet)
if snippet.get('interface') == 'sqlalchemy':
interpreter.options['session'] = sessions[0]
with opentracing.tracer.start_span('interpreter') as span:
# interpreter.execute needs the sessions, but we don't want to persist them
pre_execute_sessions = notebook['sessions']
notebook['sessions'] = sessions
response['handle'] = interpreter.execute(notebook, snippet)
notebook['sessions'] = pre_execute_sessions
# Retrieve and remove the result from the handle
if response['handle'].get('sync'):
result = response['handle'].pop('result')
finally:
if historify:
_snippet = [s for s in notebook['snippets'] if s['id'] == snippet['id']][0]
if 'id' in active_executable: # Editor v2
# notebook_executable is the 1-to-1 match of active_executable in the notebook structure
notebook_executable = [e for e in _snippet['executor']['executables'] if e['id'] == active_executable['id']][0]
if 'handle' in response:
notebook_executable['handle'] = response['handle']
if history:
notebook_executable['history'] = {
'id': history.id,
'uuid': history.uuid
}
notebook_executable['operationId'] = history.uuid
if 'handle' in response: # No failure
if 'result' not in _snippet: # Editor v2
_snippet['result'] = {}
_snippet['result']['handle'] = response['handle']
_snippet['result']['statements_count'] = response['handle'].get('statements_count', 1)
_snippet['result']['statement_id'] = response['handle'].get('statement_id', 0)
_snippet['result']['handle']['statement'] = response['handle'].get(
'statement', snippet['statement']
).strip() # For non HS2, as non multi query yet
else:
_snippet['status'] = 'failed'
if history: # If _historify failed, history will be None.
# If we get Atomic block exception, something underneath interpreter.execute() crashed and is not handled.
history.update_data(notebook)
history.save()
response['history_id'] = history.id
response['history_uuid'] = history.uuid
if notebook['isSaved']: # Keep track of history of saved queries
response['history_parent_uuid'] = history.dependencies.filter(type__startswith='query-').latest('last_modified').uuid
except QueryError as ex: # We inject the history information from _historify() to the failed queries
if response.get('history_id'):
ex.extra['history_id'] = response['history_id']
if response.get('history_uuid'):
ex.extra['history_uuid'] = response['history_uuid']
if response.get('history_parent_uuid'):
ex.extra['history_parent_uuid'] = response['history_parent_uuid']
raise ex
# Inject and HTML escape results
if result is not None:
response['result'] = result
response['result']['data'] = escape_rows(result['data'])
response['status'] = 0
return response
@require_POST
@check_document_access_permission
@api_error_handler
def execute(request, dialect=None):
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
if dialect:
notebook['dialect'] = dialect
with opentracing.tracer.start_span('notebook-execute') as span:
span.set_tag('user-id', request.user.username)
response = _execute_notebook(request, notebook, snippet)
span.set_tag('query-id', response.get('handle', {}).get('guid'))
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def check_status(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
with opentracing.tracer.start_span('notebook-check_status') as span:
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet.get('result', {}).get('handle', {}).get('guid')
)
response = _check_status(request, notebook=notebook, snippet=snippet, operation_id=operation_id)
return JsonResponse(response)
def _check_status(request, notebook=None, snippet=None, operation_id=None):
response = {'status': -1}
if operation_id or not snippet: # To unify with _get_snippet
nb_doc = Document2.objects.get_by_uuid(user=request.user, uuid=operation_id or notebook['uuid'])
notebook = Notebook(document=nb_doc).get_data() # Used below
snippet = notebook['snippets'][0]
try:
response['query_status'] = get_api(request, snippet).check_status(notebook, snippet)
response['status'] = 0
except SessionExpired:
response['status'] = 'expired'
raise
except QueryExpired:
response['status'] = 'expired'
raise
finally:
if response['status'] == 0 and snippet['status'] != response['query_status']:
status = response['query_status']['status']
elif response['status'] == 'expired':
status = 'expired'
else:
status = 'failed'
if response.get('query_status'):
has_result_set = response['query_status'].get('has_result_set')
else:
has_result_set = None
if notebook.get('dialect') or notebook['type'].startswith('query') or notebook.get('isManaged'):
nb_doc = Document2.objects.get_by_uuid(user=request.user, uuid=operation_id or notebook['uuid'])
if nb_doc.can_write(request.user):
nb = Notebook(document=nb_doc).get_data()
if status != nb['snippets'][0]['status'] or has_result_set != nb['snippets'][0].get('has_result_set'):
nb['snippets'][0]['status'] = status
if has_result_set is not None:
nb['snippets'][0]['has_result_set'] = has_result_set
nb['snippets'][0]['result']['handle']['has_result_set'] = has_result_set
nb_doc.update_data(nb)
nb_doc.save()
return response
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_data(request):
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
rows = json.loads(request.POST.get('rows', '100'))
start_over = json.loads(request.POST.get('startOver', 'false'))
with opentracing.tracer.start_span('notebook-fetch_result_data') as span:
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response = _fetch_result_data(request, notebook, snippet, operation_id, rows=rows, start_over=start_over)
response['status'] = 0
return JsonResponse(response)
def _fetch_result_data(request, notebook=None, snippet=None, operation_id=None, rows=100, start_over=False, nulls_only=False):
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
response = {
'result': get_api(request, snippet).fetch_result(notebook, snippet, rows, start_over)
}
# Materialize and HTML escape results
if response['result'].get('data') and response['result'].get('type') == 'table' and not response['result'].get('isEscaped'):
response['result']['data'] = escape_rows(response['result']['data'], nulls_only=nulls_only)
response['result']['isEscaped'] = True
return response
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_metadata(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-fetch_result_metadata') as span:
response['result'] = get_api(request, snippet).fetch_result_metadata(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_size(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
notebook = _get_notebook(request.user, notebook, operation_id)
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-fetch_result_size') as span:
response['result'] = get_api(request, snippet).fetch_result_size(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def cancel_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
operation_id = request.POST.get('operationId') or notebook['uuid']
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-cancel_statement') as span:
response['result'] = get_api(request, snippet).cancel(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_logs(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
notebook = _get_notebook(request.user, notebook, operation_id)
if operation_id and not notebook.get('uuid'):
notebook['uuid'] = operation_id
startFrom = request.POST.get('from')
startFrom = int(startFrom) if startFrom else None
size = request.POST.get('size')
size = int(size) if size else None
full_log = smart_str(request.POST.get('full_log', ''))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
db = get_api(request, snippet)
with opentracing.tracer.start_span('notebook-get_logs') as span:
logs = smart_str(db.get_log(notebook, snippet, startFrom=startFrom, size=size))
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
full_log += logs
jobs = db.get_jobs(notebook, snippet, full_log)
response['logs'] = logs.strip()
response['progress'] = min(
db.progress(notebook, snippet, logs=full_log),
99
) if snippet['status'] != 'available' and snippet['status'] != 'success' else 100
response['jobs'] = jobs
response['isFullLogs'] = db.get_log_is_full_log(notebook, snippet)
response['status'] = 0
return JsonResponse(response)
def _save_notebook(notebook, user):
if notebook['snippets'][0].get('connector') and notebook['snippets'][0]['connector'].get('dialect'): # TODO Connector unification
notebook_type = 'query-%(dialect)s' % notebook['snippets'][0]['connector']
if notebook['snippets'][0] and notebook['snippets'][0].get('executor'):
notebook['snippets'][0]['executor']['executables'] = []
else:
| |
masks</b><br>
""")
self.setWindowTitle('Multiple segm.npz files detected')
is_win = sys.platform.startswith("win")
mainLayout = QVBoxLayout()
infoLayout = QHBoxLayout()
selectionLayout = QGridLayout()
buttonsLayout = QHBoxLayout()
# Standard Qt Question icon
label = QLabel()
standardIcon = getattr(QStyle, 'SP_MessageBoxQuestion')
icon = self.style().standardIcon(standardIcon)
pixmap = icon.pixmap(60, 60)
label.setPixmap(pixmap)
infoLayout.addWidget(label)
infoLabel = QLabel(informativeText)
infoLayout.addWidget(infoLabel)
infoLayout.addStretch(1)
mainLayout.addLayout(infoLayout)
questionText = html_utils.paragraph(
'Select which segmentation file to load:'
)
label = QLabel(questionText)
listWidget = QListWidget()
listWidget.addItems(images_ls)
listWidget.setCurrentRow(0)
self.items = list(images_ls)
self.listWidget = listWidget
okButton = widgets.okPushButton(' Load selected ')
okAndRemoveButton = QPushButton(
'Load selected and delete the other files'
)
okAndRemoveButton.setIcon(QIcon(':bin.svg'))
txt = 'Reveal in Finder...' if is_mac else 'Show in Explorer...'
showInFileManagerButton = widgets.showInFileManagerButton(txt)
cancelButton = widgets.cancelPushButton(' Cancel ')
buttonsLayout.addStretch(1)
buttonsLayout.addWidget(cancelButton)
buttonsLayout.addWidget(showInFileManagerButton)
buttonsLayout.addSpacing(20)
buttonsLayout.addWidget(okButton)
buttonsLayout.setContentsMargins(0, 10, 0, 10)
selectionLayout.addWidget(label, 0, 1, alignment=Qt.AlignLeft)
selectionLayout.addWidget(listWidget, 1, 1)
selectionLayout.setColumnStretch(0, 1)
selectionLayout.setColumnStretch(1, 3)
selectionLayout.setColumnStretch(2, 1)
selectionLayout.addLayout(buttonsLayout, 2, 1)
mainLayout.addLayout(selectionLayout)
self.setLayout(mainLayout)
self.okButton = okButton
self.okAndRemoveButton = okAndRemoveButton
# Connect events
okButton.clicked.connect(self.ok_cb)
okAndRemoveButton.clicked.connect(self.ok_cb)
cancelButton.clicked.connect(self.close)
showInFileManagerButton.clicked.connect(self.showInFileManager)
def showInFileManager(self, checked=True):
myutils.showInExplorer(self.parent_path)
def ok_cb(self, event):
self.cancel = False
self.selectedItemText = self.listWidget.selectedItems()[0].text()
self.selectedItemIdx = self.items.index(self.selectedItemText)
self.close()
def exec_(self):
self.show(block=True)
def show(self, block=False):
self.setWindowFlags(Qt.Dialog | Qt.WindowStaysOnTopHint)
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def closeEvent(self, event):
if hasattr(self, 'loop'):
self.loop.exit()
class QDialogPbar(QDialog):
def __init__(self, title='Progress', infoTxt='', parent=None):
self.workerFinished = False
self.aborted = False
self.clickCount = 0
super().__init__(parent)
abort_text = 'Control+Cmd+C to abort' if is_mac else 'Ctrl+Alt+C to abort'
self.setWindowTitle(f'{title} ({abort_text})')
self.setWindowFlags(Qt.Window)
mainLayout = QVBoxLayout()
pBarLayout = QGridLayout()
if infoTxt:
infoLabel = QLabel(infoTxt)
mainLayout.addWidget(infoLabel, alignment=Qt.AlignCenter)
self.progressLabel = QLabel()
self.QPbar = QProgressBar(self)
self.QPbar.setValue(0)
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(207, 235, 155))
palette.setColor(QtGui.QPalette.Text, QtGui.QColor(0, 0, 0))
palette.setColor(QtGui.QPalette.HighlightedText, QtGui.QColor(0, 0, 0))
self.QPbar.setPalette(palette)
pBarLayout.addWidget(self.QPbar, 0, 0)
self.ETA_label = QLabel('NDh:NDm:NDs')
pBarLayout.addWidget(self.ETA_label, 0, 1)
self.metricsQPbar = QProgressBar(self)
self.metricsQPbar.setValue(0)
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(207, 235, 155))
palette.setColor(QtGui.QPalette.Text, QtGui.QColor(0, 0, 0))
palette.setColor(QtGui.QPalette.HighlightedText, QtGui.QColor(0, 0, 0))
self.metricsQPbar.setPalette(palette)
pBarLayout.addWidget(self.metricsQPbar, 1, 0)
#pBarLayout.setColumnStretch(2, 1)
mainLayout.addWidget(self.progressLabel)
mainLayout.addLayout(pBarLayout)
self.setLayout(mainLayout)
# self.setModal(True)
def keyPressEvent(self, event):
isCtrlAlt = event.modifiers() == (Qt.ControlModifier | Qt.AltModifier)
if isCtrlAlt and event.key() == Qt.Key_C:
doAbort = self.askAbort()
if doAbort:
self.aborted = True
self.workerFinished = True
self.close()
def askAbort(self):
msg = widgets.myMessageBox()
txt = html_utils.paragraph("""
Aborting with "Ctrl+Alt+C" is <b>not safe</b>.<br><br>
The system status cannot be predicted and
it will <b>require a restart</b>.<br><br>
Are you sure you want to abort?
""")
yesButton, noButton = msg.critical(
self, 'Are you sure you want to abort?', txt,
buttonsTexts=('Yes', 'No')
)
return msg.clickedButton == yesButton
def abort(self):
self.clickCount += 1
self.aborted = True
if self.clickCount > 3:
self.workerFinished = True
self.close()
def closeEvent(self, event):
if not self.workerFinished:
event.ignore()
class QDialogModelParams(QDialog):
def __init__(
self, init_params, segment_params, model_name,
url=None, parent=None):
self.cancel = True
super().__init__(parent)
self.setWindowTitle(f'{model_name} parameters')
mainLayout = QVBoxLayout()
buttonsLayout = QHBoxLayout()
initGroupBox, self.init_argsWidgets = self.createGroupParams(
init_params,
'Parameters for model initialization'
)
segmentGroupBox, self.segment2D_argsWidgets = self.createGroupParams(
segment_params,
'Parameters for 2D segmentation'
)
okButton = widgets.okPushButton(' Ok ')
buttonsLayout.addWidget(okButton)
infoButton = QPushButton(' More info... ')
infoButton.setIcon(QIcon(':info.svg'))
buttonsLayout.addWidget(infoButton)
cancelButton = QPushButton(' Cancel ')
buttonsLayout.addWidget(cancelButton)
buttonsLayout.setContentsMargins(0, 10, 0, 10)
okButton.clicked.connect(self.ok_cb)
infoButton.clicked.connect(self.info_params)
cancelButton.clicked.connect(self.close)
mainLayout.addWidget(initGroupBox)
mainLayout.addSpacing(15)
mainLayout.addStretch(1)
mainLayout.addWidget(segmentGroupBox)
# Add minimum size spinbox whihc is valid for all models
artefactsGroupBox = postProcessSegmParams(
'Post-processing segmentation parameters'
)
artefactsGroupBox.setCheckable(True)
artefactsGroupBox.setChecked(True)
self.artefactsGroupBox = artefactsGroupBox
self.minSize_SB = artefactsGroupBox.minSize_SB
self.minSolidity_DSB = artefactsGroupBox.minSolidity_DSB
self.maxElongation_DSB = artefactsGroupBox.maxElongation_DSB
mainLayout.addSpacing(15)
mainLayout.addStretch(1)
mainLayout.addWidget(artefactsGroupBox)
if url is not None:
mainLayout.addWidget(
self.createSeeHereLabel(url),
alignment=Qt.AlignCenter
)
mainLayout.addLayout(buttonsLayout)
self.setLayout(mainLayout)
font = QtGui.QFont()
font.setPixelSize(13)
self.setFont(font)
# self.setModal(True)
def info_params(self):
from cellacdc.models import CELLPOSE_MODELS, STARDIST_MODELS
self.infoWin = widgets.myMessageBox()
self.infoWin.setWindowTitle('Model parameters info')
self.infoWin.setIcon()
cp_models = [f' - {m}'for m in CELLPOSE_MODELS]
cp_models = '<br>'.join(cp_models)
stardist_models = [f' - {m}'for m in STARDIST_MODELS]
stardist_models = '<br>'.join(stardist_models)
txt = html_utils.paragraph(
'Currently Cell-ACDC has <b>four models implemented</b>: '
'YeaZ, Cellpose, StarDist, and YeastMate.<br><br>'
'Cellpose and StarDist have the following default models available:<br><br>'
'<b>Cellpose</b>:<br><br>'
f'{cp_models}<br><br>'
'<b>StarDist</b>:<br>'
f'{stardist_models}'
)
self.infoWin.addText(txt)
self.infoWin.addButton(' Ok ')
self.infoWin.show()
def createGroupParams(self, ArgSpecs_list, groupName):
ArgWidget = namedtuple('ArgsWidgets', ['name', 'type', 'widget'])
ArgsWidgets_list = []
groupBox = QGroupBox(groupName)
groupBoxLayout = QGridLayout()
for row, ArgSpec in enumerate(ArgSpecs_list):
var_name = ArgSpec.name.replace('_', ' ').capitalize()
label = QLabel(f'{var_name}: ')
groupBoxLayout.addWidget(label, row, 0, alignment=Qt.AlignRight)
if ArgSpec.type == bool:
trueRadioButton = QRadioButton('True')
falseRadioButton = QRadioButton('False')
if ArgSpec.default:
trueRadioButton.setChecked(True)
else:
falseRadioButton.setChecked(True)
widget = trueRadioButton
groupBoxLayout.addWidget(trueRadioButton, row, 1)
groupBoxLayout.addWidget(falseRadioButton, row, 2)
elif ArgSpec.type == int:
spinBox = QSpinBox()
spinBox.setAlignment(Qt.AlignCenter)
spinBox.setMaximum(2147483647)
spinBox.setValue(ArgSpec.default)
widget = spinBox
groupBoxLayout.addWidget(spinBox, row, 1, 1, 2)
elif ArgSpec.type == float:
doubleSpinBox = QDoubleSpinBox()
doubleSpinBox.setAlignment(Qt.AlignCenter)
doubleSpinBox.setMaximum(2**32)
doubleSpinBox.setValue(ArgSpec.default)
widget = doubleSpinBox
groupBoxLayout.addWidget(doubleSpinBox, row, 1, 1, 2)
else:
lineEdit = QLineEdit()
lineEdit.setText(str(ArgSpec.default))
lineEdit.setAlignment(Qt.AlignCenter)
widget = lineEdit
groupBoxLayout.addWidget(lineEdit, row, 1, 1, 2)
argsInfo = ArgWidget(
name=ArgSpec.name,
type=ArgSpec.type,
widget=widget,
)
ArgsWidgets_list.append(argsInfo)
groupBox.setLayout(groupBoxLayout)
return groupBox, ArgsWidgets_list
def createSeeHereLabel(self, url):
htmlTxt = f'<a href=\"{url}">here</a>'
seeHereLabel = QLabel()
seeHereLabel.setText(f"""
<p style="font-size:12px">
See {htmlTxt} for details on the parameters
</p>
""")
seeHereLabel.setTextFormat(Qt.RichText)
seeHereLabel.setTextInteractionFlags(Qt.TextBrowserInteraction)
seeHereLabel.setOpenExternalLinks(True)
seeHereLabel.setStyleSheet("padding:12px 0px 0px 0px;")
return seeHereLabel
def argsWidgets_to_kwargs(self, argsWidgets):
kwargs_dict = {}
for argWidget in argsWidgets:
if argWidget.type == bool:
kwargs_dict[argWidget.name] = argWidget.widget.isChecked()
elif argWidget.type == int or argWidget.type == float:
kwargs_dict[argWidget.name] = argWidget.widget.value()
elif argWidget.type == str:
kwargs_dict[argWidget.name] = argWidget.widget.text()
else:
to_type = argWidget.type
s = argWidget.widget.text()
kwargs_dict[argWidget.name] = eval(s)
return kwargs_dict
def ok_cb(self, checked):
self.cancel = False
self.init_kwargs = self.argsWidgets_to_kwargs(self.init_argsWidgets)
self.segment2D_kwargs = self.argsWidgets_to_kwargs(
self.segment2D_argsWidgets
)
self.minSize = self.minSize_SB.value()
self.minSolidity = self.minSolidity_DSB.value()
self.maxElongation = self.maxElongation_DSB.value()
self.applyPostProcessing = self.artefactsGroupBox.isChecked()
self.close()
def exec_(self):
self.show(block=True)
def show(self, block=False):
self.setWindowFlags(Qt.Window | Qt.WindowStaysOnTopHint)
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def closeEvent(self, event):
if hasattr(self, 'loop'):
self.loop.exit()
class downloadModel(QMessageBox):
def __init__(self, model_name, parent=None):
super().__init__(parent)
self.loop = None
self.model_name = model_name
def download(self):
success = myutils.download_model(self.model_name)
if not success:
self.exec_()
def exec_(self):
self.show(block=True)
def show(self, block=False):
import cellacdc
model_name = self.model_name
m = model_name.lower()
weights_filenames = getattr(cellacdc, f'{m}_weights_filenames')
self.setIcon(self.Critical)
self.setWindowTitle(f'Download of {model_name} failed')
self.setTextFormat(Qt.RichText)
url, alternative_url = myutils._model_url(
model_name, return_alternative=True
)
url_href = f'<a href="{url}">this link</a>'
alternative_url_href = f'<a href="{alternative_url}">this link</a>'
_, model_path = myutils.get_model_path(model_name, create_temp_dir=False)
txt = (f"""
<p style=font-size:13px>
Automatic download of {model_name} failed.<br><br>
Please, <b>manually download</b> the model weights from {url_href} or
{alternative_url_href}.<br><br>
Next, unzip the content of the downloaded file into the
following folder:<br><br>
{model_path}<br>
</p>
<p style=font-size:12px>
<i>NOTE: if clicking on the link above does not work
copy one of the links below and paste it into the browser</i><br><br>
{url}<br>{alternative_url}
</p>
""")
self.setText(txt)
weights_paths = [os.path.join(model_path, f) for f in weights_filenames]
weights = '\n\n'.join(weights_paths)
self.setDetailedText(
f'Files that {model_name} requires:\n\n'
f'{weights}'
)
okButton = widgets.okPushButton('Ok')
self.addButton(okButton, self.YesRole)
okButton.disconnect()
okButton.clicked.connect(self.close_)
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def close_(self):
self.hide()
self.close()
if self.loop is not None:
self.loop.exit()
class warnVisualCppRequired(QMessageBox):
def __init__(self, pkg_name='javabridge', parent=None):
super().__init__(parent)
self.loop = None
self.screenShotWin = None
self.setModal(False)
self.setIcon(self.Warning)
self.setWindowTitle(f'Installation of {pkg_name} info')
self.setTextFormat(Qt.RichText)
txt = (f"""
<p style=font-size:12px>
Installation of {pkg_name} on Windows requires
Microsoft Visual C++ 14.0 or higher.<br><br>
Cell-ACDC will anyway try to install {pkg_name} now.<br><br>
If the installation fails, please <b>close Cell-ACDC</b>,
then download and install <b>"Microsoft C++ Build Tools"</b>
from the link below
before trying this module again.<br><br>
<a href='https://visualstudio.microsoft.com/visual-cpp-build-tools/'>
https://visualstudio.microsoft.com/visual-cpp-build-tools/
</a><br><br>
<b>IMPORTANT</b>: when installing "Microsoft C++ Build Tools"
make sure to select <b>"Desktop development with C++"</b>.
Click "See the screenshot" for more details.
</p>
""")
seeScreenshotButton = QPushButton('See screenshot...')
okButton = widgets.okPushButton('Ok')
self.addButton(okButton, self.YesRole)
okButton.disconnect()
okButton.clicked.connect(self.close_)
self.addButton(seeScreenshotButton, self.HelpRole)
seeScreenshotButton.disconnect()
seeScreenshotButton.clicked.connect(
self.viewScreenshot
)
self.setText(txt)
def viewScreenshot(self, checked=False):
self.screenShotWin = widgets.view_visualcpp_screenshot()
self.screenShotWin.show()
def exec_(self):
self.show(block=True)
def show(self, block=False):
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def close_(self):
self.hide()
self.close()
if self.loop is not None:
self.loop.exit()
if self.screenShotWin is not None:
self.screenShotWin.close()
if __name__ == '__main__':
# Create the application
app = QApplication(sys.argv)
font = QtGui.QFont()
font.setPixelSize(13)
# title='Select channel name'
# CbLabel='Select channel name: '
# informativeText = ''
# win = QtSelectItems(title, ['mNeon', 'mKate'],
# informativeText, CbLabel=CbLabel, parent=None)
# win = edgeDetectionDialog(None)
# win = QDialogEntriesWidget(entriesLabels=['Input 1'])
IDs = list(range(1,10))
cc_stage = ['G1' for ID in IDs]
num_cycles = [-1]*len(IDs)
relationship = ['mother' for ID in IDs]
related_to = [-1]*len(IDs)
is_history_known = [False]*len(IDs)
corrected_assignment = [False]*len(IDs)
cca_df = pd.DataFrame({
'cell_cycle_stage': cc_stage,
'generation_num': num_cycles,
'relative_ID': related_to,
'relationship': relationship,
'emerg_frame_i': num_cycles,
'division_frame_i': num_cycles,
'is_history_known': is_history_known,
'corrected_assignment': corrected_assignment},
index=IDs)
cca_df.index.name = 'Cell_ID'
#
# df = cca_df.reset_index()
#
# win = pdDataFrameWidget(df)
# win = QDialogMetadataXML(
# rawDataStruct=1, chNames=[''], | |
<gh_stars>0
# author: jteoh
# note: This is a snapshot of a script in my utils repo: https://gitli.corp.linkedin.com/jteoh/utils/source/master:thirdeye/detector_admin.py
# Use that url for the latest version of the script.
# fix desktop python path for argparse
import sys
sys.path.insert(1, '/usr/local/linkedin/lib/python2.6/site-packages')
import argparse
import cmd
from datetime import date, datetime, timedelta
import json
from pprint import pprint
import httplib
import re
import urllib
client = None
class ThirdEyeHttpClient(object):
def __init__(self, base, app_port=19044, admin_port=11120):
base = str(base)
print "Using host: ", base
self.application_host = base + ":" + str(app_port)
self.admin_host = base + ":" + str(admin_port)
def curl(self, method, endpoint, additional_params={}):
return self.curl_helper(method, endpoint, **additional_params)
def curl_helper(self, method, endpoint, data=None, print_result=False, is_admin_request=False):
host = self.application_host if not is_admin_request else self.admin_host
print method, host + endpoint, data or ''
conn = httplib.HTTPConnection(host)
conn.request(method, endpoint, data, headers={'Content-type': 'application/json'})
resp = conn.getresponse()
result = resp.read()
conn.close()
status = resp.status
reason = resp.reason
print status, reason
if status == 200 and result:
#byteify if applicable
try:
result = byteify(json.loads(result))
except Exception:
pass
if print_result:
if status == 200 or 204: # 204 = no content
if callable(print_result):
result = print_result(result)
elif not result and type(print_result) == str:
result = print_result
if result:
if type(result) == str:
print result
else:
pprint(result)
#TODO raise error if failed.
return resp.status, resp.reason, result
FUNCTIONS_ENDPOINT = '/api/anomaly-functions/'
JOBS_ENDPOINT = '/api/anomaly-jobs/'
EMAIL_REPORTS_ENDPOINT = '/api/email-reports/'
API = '/api/'
FUNCTIONS_ENDPOINT = API + 'anomaly-functions/'
JOBS_ENDPOINT = API + 'anomaly-jobs/'
EMAIL_REPORTS_ENDPOINT = API + 'email-reports/'
ANOMALY_RESULTS_ENDPOINT = API + 'anomaly-results/'
MULTIPLE_INP_KEY = "inps"
""" Command Loop """
class DetectorAdminShell(cmd.Cmd):
intro = "Type ? or 'help' for a full list of available command line commands, or 'usage' for detector actions."
prompt = "\n(thirdeye-detector) "
def __init__(self, parser):
self.parser = parser
cmd.Cmd.__init__(self)
def default(self, line):
try:
args = vars(self.parser.parse_args(line.split()))
func = args.pop('func')
func(**args)
except SystemExit:
#keep looping if the internal parser tries to exit.
pass
except Exception as e:
print type(e), e
def do_bye(self, arg):
#DUBAI hehe :D
'Exits in a fun manner.'
return self._exit_()
def do_exit(self, arg):
'Exits the current program.'
return self._exit_()
def do_quit(self, arg):
'Exits the current program.'
return self._exit_()
def do_usage(self, arg):
'Displays usage info detector admin commands'
self.parser.print_help()
def help_help(self):
#really??
print "Really? Shows a help message"
def start(self):
try:
self.cmdloop()
except KeyboardInterrupt:
self._exit_()
def _exit_(self):
print "Exiting..."
return True
""" Parsers """
def add_function_subparser(subparsers):
""" GET, GET <id>, POST <data>, DELETE <id> """
functions = subparsers.add_parser('functions', help='anomaly function definitions')
function_subparsers = functions.add_subparsers()
show_parser = function_subparsers.add_parser('show', help='show all functions')
show_parser.set_defaults(func=show_functions)
show_ids_parser = function_subparsers.add_parser('show_ids', help='show only function ids')
show_ids_parser.set_defaults(func=show_function_ids)
find_parser = function_subparsers.add_parser('find', help='find a function')
find_parser.add_argument('inps', type=int, nargs='+', help='function ids', metavar='ids')
find_parser.set_defaults(func=find_function)
create_parser = function_subparsers.add_parser('create', help='create a new function')
create_parser.add_argument('inps', nargs='+', help='JSON files specifying functions to be created', metavar='file_paths')
create_parser.set_defaults(func=create_function)
delete_parser = function_subparsers.add_parser('delete', help='delete a function')
delete_parser.add_argument('inps', type=int, nargs='+', help='function ids', metavar='ids')
delete_parser.set_defaults(func=delete_function)
def add_jobs_subparser(subparsers):
""" GET, POST <id>, POST <id> (adhoc, optional start+end), DELETE <id> """
jobs = subparsers.add_parser('jobs', help='anomaly function schedules')
jobs_subparsers = jobs.add_subparsers()
show_parser = jobs_subparsers.add_parser('show', help='show all active jobs')
show_parser.set_defaults(func=show_active_jobs)
enable_parser = jobs_subparsers.add_parser('enable', help='enable job schedule')
enable_parser.add_argument('inps', type=int, nargs='+', help='job ids', metavar='ids')
enable_parser.set_defaults(func=enable_job)
adhoc_parser = jobs_subparsers.add_parser('adhoc', help='run adhoc job')
adhoc_parser.add_argument('inps', type=int, nargs='+', help='job ids', metavar='ids')
adhoc_parser.add_argument('--start', help='start time in IS08601 or as daysago(#)', required=False)
adhoc_parser.add_argument('--end', help='end time in IS08601 or as daysago(#)', required=False)
adhoc_parser.set_defaults(func=adhoc_job)
disable_parser = jobs_subparsers.add_parser('disable', help='disable job schedule')
disable_parser.add_argument('inps', type=int, nargs='+', help='job ids', metavar='ids')
disable_parser.set_defaults(func=disable_job)
def add_email_reports_subparser(subparsers):
""" GET, GET <id>, POST <data>, POST <id> (adhoc), DELETE <id> """
email_reports = subparsers.add_parser('reports', help='email report definitions')
email_reports_subparser = email_reports.add_subparsers()
show_parser = email_reports_subparser.add_parser('show', help='show all email reports')
show_parser.set_defaults(func=show_email_reports)
find_parser = email_reports_subparser.add_parser('find', help='find an email report')
find_parser.add_argument('inps', type=int, nargs='+', help='email_report ids', metavar='ids')
find_parser.set_defaults(func=find_email_report)
create_parser = email_reports_subparser.add_parser('create', help='create a new email report. be sure to reset the scheduler afterwards!')
create_parser.add_argument('inps', nargs='+', help='JSON files specifying email reports to be created', metavar='file_paths')
create_parser.set_defaults(func=create_email_report)
adhoc_parser = email_reports_subparser.add_parser('adhoc', help='send adhoc email report')
adhoc_parser.add_argument('inps', type=int, nargs='+', help='email_report_ids', metavar='ids')
adhoc_parser.set_defaults(func=adhoc_email_report)
delete_parser = email_reports_subparser.add_parser('delete', help='delete an email report')
delete_parser.add_argument('inps', type=int, nargs='+', help='email_report ids', metavar='ids')
delete_parser.set_defaults(func=delete_email_report)
reset_parser = email_reports_subparser.add_parser('reset', help='reset the email scheduler, required for changes to take effect')
reset_parser.set_defaults(func=reset_email_scheduler)
def add_anomaly_results_subparser(subparsers):
""" GET <id>, GET <collection> <start> [<end>], POST <data>, DELETE <id> """
# Would be nice to have:
# 1. Find by function id
# 2. Show all
results = subparsers.add_parser('results', help='anomaly results')
results_subparser = results.add_subparsers()
find_parser = results_subparser.add_parser('find', help='find an anomaly result')
find_parser.add_argument('inps', type=int, nargs='+', help='result ids', metavar='ids')
find_parser.set_defaults(func=find_anomaly_result)
show_parser = results_subparser.add_parser('show', help='show anomaly results for a collection + time frame')
show_parser.add_argument('collection', help='thirdeye collection')
show_parser.add_argument('--start', help='start time in IS08601 or as daysago(#), default=daysago(7)', required=False, default=convert_to_iso('daysago(7)'))
show_parser.add_argument('--end', help='end time in IS08601 or as daysago(#)', required=False)
show_parser.set_defaults(func=show_anomaly_results_for_collection)
# create_parser = results_subparser.add_parser('create', help='create a new anomaly result')
# create_parser.add_argument('inps', nargs='+', help='JSON files specifying result to be created', metavar='file_paths')
# create_parser.set_defaults(func=create_anomaly_result)
# delete_parser = results_subparser.add_parser('delete', help='delete an anomaly result')
# delete_parser.add_argument('inps', type=int, nargs='+', help='result ids', metavar='ids')
# delete_parser.set_defaults(func=delete_anomaly_result)
""" Utility methods """
# Remove unicode encoding: http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python
def byteify(input):
if isinstance(input, dict):
return dict([(byteify(key), byteify(value)) for key, value in input.iteritems()])
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def action_msg_generator(entity, action):
return lambda s: str(action).capitalize() + ' ' + str(entity) + ': ' + str(s)
def delete_msg_success(entity):
return action_msg_generator(entity, 'deleted')
def create_msg_success(entity):
return action_msg_generator(entity, 'created')
def convert_to_iso(s):
result = None
m = re.search('daysago\((\d+)\)', s)
if m:
daysago = -int(m.group(1))
today = datetime.combine(datetime.now(), datetime.min.time())
target_date = today - timedelta(days=(-1 * daysago))
result = target_date.isoformat()
else:
#TODO don't simply assume date is iso 8601 compatible...
result = s
return result
""" Decorators for sending requests """
# Credit for guidance from http://thecodeship.com/patterns/guide-to-python-function-decorators/
def Request(func):
def func_wrapper(*args, **kwargs):
curl_params = func(*args, **kwargs)
status, reason, result = client.curl(*curl_params)
return status, reason, result
return func_wrapper
def MultipleInps(func):
def func_wrapper(inps, **args):
results = []
failed = []
for inp in inps:
try:
if args:
result = func(inp, **args)
else:
result = func(inp)
results.append(result)
except Exception as e:
failed.append(inp)
print e
if failed:
print "Failed: ", failed
return results
return func_wrapper
""" Actual parser methods """
@Request
def show_functions(print_result=True):
print "Retrieving functions"
return 'GET', FUNCTIONS_ENDPOINT, {'print_result': print_result}
def show_function_ids():
status, reason, functions = show_functions(print_result=False)
pprint([f['id'] for f in functions])
@MultipleInps
@Request
def find_function(id):
print "Finding function id %d" % id
return 'GET', FUNCTIONS_ENDPOINT + str(id), {'print_result': True}
@MultipleInps
@Request
def create_function(file_path):
print "Creating function from file_path %s" % file_path
#callable
with open(file_path, 'r') as f:
data = f.read()
return 'POST', FUNCTIONS_ENDPOINT, {'data': data, 'print_result': create_msg_success('function')}
@MultipleInps
@Request
def delete_function(id):
print "Deleting function id %d" % id
return 'DELETE', FUNCTIONS_ENDPOINT + str(id), {'print_result': "Deleted function id %d" % id}
@Request
def show_active_jobs():
print "Showing active jobs"
return 'GET', JOBS_ENDPOINT, {'print_result': True}
@MultipleInps
@Request
def enable_job(id):
print "Enabling job id %d" % id
return 'POST', JOBS_ENDPOINT + str(id)
@MultipleInps
@Request
def adhoc_job(id, start=None, end=None):
if bool(start) != bool(end):
raise ValueError("Both start and end are required if either is present")
if start and end:
start = convert_to_iso(start)
end = convert_to_iso(end)
print "Running adhoc job id %d on window %s to %s" % (id, start, end)
return 'POST', JOBS_ENDPOINT + str(id) + '/ad-hoc?' + urllib.urlencode({'start': start, 'end': end})
else:
print "Running adhoc job id %d" % id
return 'POST', JOBS_ENDPOINT + str(id) + '/ad-hoc'
@MultipleInps
@Request
def disable_job(id):
print "Disabling job id %d" % id
return 'DELETE', JOBS_ENDPOINT + str(id)
@Request
def show_email_reports():
print "Showing email_reports"
return 'GET', EMAIL_REPORTS_ENDPOINT, {'print_result': True}
@MultipleInps
@Request
def find_email_report(id):
print "Finding email report id %d" % id
return 'GET', EMAIL_REPORTS_ENDPOINT + str(id), {'print_result': True}
@MultipleInps
@Request
def create_email_report(file_path):
print "Creating email report from file_path %s" % file_path
with open(file_path, 'r') as f:
data = f.read()
return 'POST', EMAIL_REPORTS_ENDPOINT, {'data': data, 'print_result': create_email_report_helper}
def create_email_report_helper(result):
print create_msg_success('email report')(result), "\n"
reset_now = raw_input("The email scheduler must be reset for changes to take place. Do you want to reset the scheduler right now (Y/n): ")
if reset_now is "Y":
reset_email_scheduler()
return None
@MultipleInps
@Request
def adhoc_email_report(id):
print "Running adhoc email report id %d" % id
return 'POST', EMAIL_REPORTS_ENDPOINT + str(id) + '/ad-hoc'
@MultipleInps
@Request
def delete_email_report(id):
print "Deleting email report id %d" % id
return 'DELETE', EMAIL_REPORTS_ENDPOINT + str(id), {'print_result': "Deleted email report id %d" % id}
#Special instance that needs to hit the admin port
def reset_email_scheduler():
print "Resetting email scheduler"
status, reason, result = client.curl('POST', '/admin/tasks/email?action=reset', {'is_admin_request': True})
@MultipleInps
@Request
def find_anomaly_result(id):
print "Finding anomaly result id %d" % id
return 'GET', ANOMALY_RESULTS_ENDPOINT + | |
# The following is an exploration of a tensor flow tutorial to "Build a
# Convolutional Neural Network using Estimators". The tutorial can be found
# at the following address: https://www.tensorflow.org/tutorials/estimators/cnn
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
##############################################################################
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
"""
EXAMPLE OUTPUT::
INFO:tensorflow:global_step/sec: 5.53906
INFO:tensorflow:probabilities = [[0.00001743 0.00025173 0.9591319 0.00820096 0.00020747 0.00002829
0.00094485 0.03115867 0.00005324 0.00000542]
[0. 0. 0. 0.00000272 0. 0.9999434
0.00000001 0. 0.00005383 0.00000002]
[0.00000001 0. 0.00000576 0.00001682 0.00000001 0.00000001
0. 0.9999703 0.00000001 0.00000698]
[0.00000905 0.00516407 0.00003854 0.00570529 0.05497485 0.00163765
0.00000117 0.02361589 0.00035603 0.9084975 ]
[0.00003372 0.00017863 0.00002458 0.99747926 0.00000004 0.00217183
0.00000002 0.00000097 0.000001 0.00011 ]
[0.9996506 0.00000003 0.00000717 0.00004306 0.00000003 0.00026375
0.00003316 0.00000024 0.00000163 0.0000005 ]
[0.90882015 0.00010453 0.00731953 0.04718952 0.00002098 0.00047886
0.00010599 0.00172181 0.03215327 0.00208524]
[0.00005502 0.0000006 0.00008231 0.00003047 0.9938332 0.00049089
0.0001365 0.0003415 0.00011648 0.00491314]
[0.9995291 0. 0.00005508 0.00000158 0.00000079 0.00000224
0.00018889 0.00017385 0.00000062 0.00004795]
[0.00000273 0.9995415 0.00005345 0.00017117 0.00001141 0.00002655
0.00000245 0.00011586 0.00007111 0.00000353]
[0.00000047 0.00000101 0.00049946 0.00053548 0.00000395 0.00002954
0.00000055 0.00000201 0.99763286 0.00129464]
[0.00028508 0.00000024 0.00000204 0.00055511 0.00008088 0.99559563
0.00006329 0.00000089 0.00085147 0.00256531]
[0.00000005 0.00001118 0.00008671 0.00016884 0.00000858 0.0000189
0.00029197 0.00000023 0.99940133 0.00001218]
[0.00004596 0.00000211 0.00156567 0.00001375 0.00661005 0.00013931
0.991597 0.00000014 0.00001451 0.00001148]
[0.00000004 0.00000001 0.00000107 0.00005554 0.00000596 0.00000066
0. 0.99832827 0.00000118 0.00160722]
[0.00004655 0.9792119 0.00012088 0.00031933 0.00020922 0.00224334
0.00131055 0.0000022 0.01587561 0.00066039]
[0.00000134 0.00004668 0.00218752 0.00637693 0.00000651 0.00000217
0. 0.99104226 0.00000617 0.00033039]
[0.00000634 0.99853075 0.00005702 0.00013371 0.00005548 0.00001322
0.00005108 0.00084328 0.0002979 0.00001126]
[0.00000813 0.00001352 0.00075002 0.00013396 0.00004039 0.0006004
0.00016469 0.00000023 0.9978447 0.00044395]
[0.00001541 0.98893857 0.00005396 0.00022245 0.00000523 0.00012868
0.0000033 0.00099036 0.00676024 0.00288181]
[0.00001841 0.00001041 0.00032895 0.00547894 0.00363064 0.00009032
0.00000258 0.00996082 0.00121091 0.9792681 ]
[0.00001762 0.00000002 0.00000835 0.0000001 0.9992508 0.00000603
0.00047614 0.00000227 0.00001307 0.00022552]
[0.00000284 0.00000011 0.00003634 0.00000118 0.00004083 0.00006648
0.9997342 0. 0.0001176 0.0000003 ]
[0.0000001 0.00006998 0.00002514 0.9995129 0.00000005 0.00003842
0. 0.00000175 0.00031445 0.00003734]
[0.99934393 0. 0.00003508 0.00000219 0.00000021 0.00012061
0.00000101 0.00000771 0.00038333 0.000106 ]
[0.99999905 0. 0.00000009 0. 0. 0.00000029
0. 0.00000056 0.00000001 0.00000002]
[0.32645535 0.00033754 0.17007063 0.02610587 0.01028968 0.05536265
0.12576976 0.00167584 0.2768447 0.00708797]
[0.00000001 0. 0.00000001 0.00000002 0.99999523 0.00000096
0.00000017 0.00000012 0.00000023 0.00000335]
[0.00002521 0.00001479 0.00000856 0.9625024 0.00009555 0.00038342
0.00000045 0.00013855 0.00013515 0.03669578]
[0.00011611 0.00000091 0.00029081 0.00216262 0.00140756 0.00007991
0.00000076 0.03082246 0.00021701 0.96490186]
[0.00000001 0. 0.0000021 0.0000011 0.99985087 0.00000007
0.00000089 0.00003807 0.00000066 0.00010626]
[0.00064659 0.00000034 0.00020119 0.00000122 0.001407 0.0000752
0.9976095 0.00000037 0.00003947 0.00001918]
[0.00004074 0.9891452 0.00101289 0.00080914 0.00005209 0.00594475
0.00063051 0.00000309 0.00219075 0.00017102]
[0.00001495 0.00001717 0.00005773 0.00221132 0.00018911 0.981037
0.00016352 0.00001165 0.00532743 0.01097007]
[0.00000147 0. 0.00000073 0.00022507 0.00004066 0.00001629
0. 0.99491984 0.0000121 0.00478387]
[0.00002711 0.0000141 0.00655662 0.00314194 0.00000086 0.08500261
0.00000258 0.00012525 0.9045927 0.00053623]
[0.00147657 0.00001155 0.98549545 0.00026559 0.00004082 0.00001128
0.00005969 0.00003511 0.01081199 0.00179201]
[0.0000836 0.00057733 0.00018987 0.96952933 0.00005612 0.02727005
0.00004134 0.0001644 0.00058636 0.00150166]
[0.00000158 0.00037989 0.00033482 0.9968079 0.00000036 0.00065016
0.00000005 0.00001674 0.00137774 0.00043089]
[0.0000011 0.0000033 0.00002593 0.00035911 0.9775965 0.00001023
0.00000395 0.0010437 0.00058028 0.0203758 ]
[0.00123716 0.00000011 0.9634774 0.0018821 0.0037619 0.00020576
0.02843929 0.00000207 0.00049357 0.00050092]
[0.00000075 0. 0.00000079 0.00075195 0.00000937 0.00101636
0.00000057 0.0000116 0.9966455 0.00156309]
[0.00002756 0.00000239 0.10737749 0.0003512 0.01721513 0.00047962
0.8740079 0.00017321 0.0003263 0.00003931]
[0.00034548 0.00000108 0.00001112 0.00512926 0.00084592 0.98813194
0.00120696 0.00000651 0.0006807 0.00364101]
[0.00035646 0.00235981 0.0000863 0.00022704 0.00009719 0.00070728
0.00110772 0.00000171 0.99503005 0.00002644]
[0.9956987 0.00000011 0.0013109 0.00003858 0.00001434 0.00005862
0.00263919 0.00000148 0.00020573 0.00003242]
[0.00078063 0.00000282 0.00419076 0.00102293 0.00000773 0.00000293
0.00002895 0.0000001 0.9939255 0.00003763]
[0.9998074 0.00000001 0.00002599 0.00000155 0.0000003 0.00005772
0.00000197 0.00000042 0.00001258 0.00009198]
[0.00007664 0.00000052 0.00016478 0.00008393 0.00015902 0.00000321
0.00001374 0.00002234 0.99892837 0.00054739]
[0.00000867 0.00001533 0.0001395 0.9979851 0.00000167 0.00073653
0.00000025 0.00000214 0.00075955 0.00035131]
[0.00030491 0.00000446 0.00283215 0.98258096 0.00000318 0.00328017
0.00000058 0.00002485 0.01065485 0.00031391]
[0.00000014 0.00000004 0.00000551 0.0002545 0.9919161 0.00010933
0.00001384 0.00170648 0.00006843 0.00592541]
[0.00326718 0.00653654 0.00165124 0.01345764 0.00104828 0.09381816
0.00738143 0.00029421 0.844998 0.02754727]
[0.00000262 0.00028014 0.9871634 0.00300456 0.00112792 0.00015671
0.00002962 0.00810244 0.00000797 0.00012433]
[0.9993513 0. 0.00001494 0.00000002 0.00000083 0.00000125
0.00062114 0.00000001 0.00001015 0.00000045]
[0.00009807 0.9440691 0.00467515 0.00374943 0.00413666 0.00641158
0.00447348 0.0004008 0.03069097 0.00129481]
[0.00017282 0.00002709 0.00008706 0.00020065 0.00078732 0.0020839
0.99485886 0.00000009 0.00177737 0.00000479]
[0.00001021 0.00003729 0.990328 0.00397502 0.00014924 0.00019248
0.00013659 0.00000139 0.00459831 0.00057146]
[0.23911725 0.00015748 0.24240305 0.00192308 0.5004874 0.00166103
0.00470083 0.00528471 0.00078837 0.0034769 ]
[0.00001416 0. 0.00000255 0.00002227 0.00002268 0.00000396
0.00000006 0.9973876 0.00000017 0.00254654]
[0.00059613 0.01351845 0.93063676 0.02836889 0.00014044 0.00018504
0.00528834 0.0000215 0.02123444 0.00000992]
[0.00000194 0. 0.00001005 0.00000016 | |
if line != "wait" else line for line in cmd.split("\n")]
)
job_script += [cmd]
job_script = "\n".join(job_script)
# create and navigate to workdir
cwd = os.getcwd()
if workdir is None:
workdir = cwd
pwd = workdir
else:
pwd = cwd
if not os.path.exists(workdir):
os.makedirs(workdir)
os.chdir(workdir)
if verbose:
print(workdir)
if debug:
print(job_script)
# create and submit script
prefix = "{}_".format(name if name else "job")
if scheduler == "pbs":
suffix = ".qsub"
else:
suffix = ".slurm"
with tempfile.NamedTemporaryFile(
prefix=prefix, suffix=suffix, mode="w", dir=workdir, delete=delete
) as f:
f.write(job_script)
f.flush()
os.chmod(f.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP)
if submit:
if scheduler == "pbs":
ret = sp.check_output(["qsub"] + batch_args + [f.name]).decode("UTF-8")
jobid = ret.split("\n")[0] # parse jobid
if not re.match("[0-9]+\.[\w]+", jobid):
raise RuntimeError("qsub error:\n{}".format(ret))
elif scheduler == "slurm":
ret = sp.check_output(["sbatch"] + batch_args + [f.name]).decode(
"UTF-8"
)
jobid = ret.split("\n")[0].split()[-1] # parse jobid
if not re.match("[0-9]+", jobid):
raise RuntimeError("slurm error:\n{}".format(ret))
elif debug:
jobid = "314159test"
if submit and not delete:
new = "{}.q{}".format(
name if name else os.path.basename(f.name), jobid.split(".", 1)[0]
)
new = os.path.join(os.path.dirname(f.name), new)
shutil.copy2(f.name, new)
fname = f.name
if submit and not delete:
os.remove(fname)
os.chdir(pwd)
if submit or debug:
if verbose:
print("Job ID: {}\n".format(jobid))
return jobid
else:
return None
def batch_group(cmds, group_by=1, serial=False, *args, **kwargs):
"""
Create and submit SLURM or PBS job scripts for a group of similar commands. The
commands can be grouped together into larger single jobs that run them in
parallel on multiple processors on a node.
Keyword arguments are passed on to the batch_sub function.
These will be applied to EACH job. For example, using ``nodes="1:ppn=8"``
with ``group_by=8`` and 16 elements in cmds will result in 2 jobs, each using
8 processors on 1 node.
Arguments
---------
cmds : list
The commands to run.
The commands themselves should be a string, or a list of tokens, as
per the ``batch_sub`` function.
group_by : int, optional
The number of commands to group together into a single job. Does not
balance well when ``len(cmds)%group_by != 0``
Eg. on scinet use ``group_by=8`` to efficiently use whole nodes.
serial : bool, optional
Set to ``True`` to run cmds sequentially, rather than starting them all
in parallel. This will also work with MPI/OpenMP parallel jobs.
args, kwargs : arb
Additional arguments passed to batch_sub
Returns
-------
jobids : list of strings
List of job IDs used by the scheduler
"""
grouped = []
jobids = []
name = kwargs.pop("name", None)
for i, cmd in enumerate(cmds):
if not isinstance(cmd, str):
cmd = " ".join(cmd)
if group_by > 1 and not serial:
cmd = "{} &".format(cmd)
grouped += [cmd]
if len(grouped) == group_by or i + 1 == len(cmds):
# group is full, or last command. write out a job
if group_by > 1:
grouped += ["wait"]
if name:
if (i + 1 == len(cmds)) and (len(jobids) == 0):
# all jobs in a single group
kwargs["name"] = name
else:
kwargs["name"] = "{}_grp{}".format(name, len(jobids) + 1)
jobid = batch_sub("\n".join(grouped), *args, **kwargs)
if jobid:
jobids.append(jobid)
grouped = []
if jobids:
return jobids
else:
return None
class JobArgumentParser(object):
def __init__(
self, name=None, mem=None, time=None, workdir=None, outkey=None, **kwargs
):
"""
Standardized way to add job submission arguments to a script.
Keyword arguments are used to fix values for parameters not needed by a
script. The corresponding command line arguments will not be added. For
example, if not using MPI, pass ``mpi_procs=None`` and then there will
be no ``--mpi-procs`` argument on the command line. See the
``.opt_list`` attribute for a complete list.
Arguments
---------
name : string
This will be the name of the jobs.
mem : float
Memory per job in GB. Will scale up for grouped jobs.
time : float
Time per job in hours.
workdir : string
Fixed location to use as working directory (ie place for job
scripts and logs).
outkey : string
Alternative to ``workdir``. The key/name of an argument added to
normal argparse that indicates output path. A "logs" subfolder
of that path will be used as ``workdir``.
Example
-------
>>> jp = sa.batch.JobArgumentParser("some_serial_job", mem=4, time=1.5,
mpi_procs=None, omp_threads=1, workdir="/path/to/logs")
>>> AP = argparse.ArgumentParser(description="Do some job")
>>> AP.add_argument(...) # other non-job arguments
>>> jp.add_arguments(AP)
>>> args = AP.parse_args()
>>> jp.set_job_opts(args)
>>> jobs = [...] # list of commands to run in jobs
>>> jp.submit(jobs)
"""
self.name = name
self.mem = float(mem) if mem is not None else None
self.time = float(time)
self.workdir = workdir
self.outkey = outkey
self.fixed_opts = kwargs
self.opt_list = self._opt_list()
bad_opts = [k for k in kwargs if k not in self.opt_list]
if bad_opts:
raise ValueError("Unknown options: {}".format(bad_opts))
def _opt_list(self):
"""
All options supported by this class.
Initialize from a function to conceivably allow subclass overrides.
"""
from collections import OrderedDict
return OrderedDict(
[
("queue", dict(default=None, help="Queue to which to submit jobs")),
(
"nodes",
dict(
type=str,
default=1,
help="Name or number of nodes to submit job to",
),
),
(
"ppn",
dict(
type=int,
default=None,
help="Processes per node. Default based on group, omp_threads, and mpi_procs",
),
),
(
"exclude",
dict(
type=str,
default=None,
nargs="+",
help="Nodes to exclude from jobs",
),
),
(
"slurm",
dict(
action="store_true",
default=False,
help="Create SLURM (rather than PBS) job scripts",
),
),
(
"use_cput",
dict(
action="store_true",
default=False,
help="Use CPU time rather than wall clock for jobs",
),
),
(
"cpu_speed",
dict(
type=float,
default=1.0,
help="Relative CPU speed factor, to adjust run times",
),
),
(
"nice",
dict(
type=int,
default=0,
help="Priority from -5000 (hi) to 5000 (lo). SLURM only",
),
),
(
"env_script",
dict(
default=None,
help="Script to source in jobs to set up environment",
),
),
(
"test",
dict(
action="store_true",
default=False,
help="Print options for debugging",
),
),
(
"omp_threads",
dict(
type=int, default=1, help="Number of OpenMP threads per process"
),
),
(
"mpi_procs",
dict(
type=int, default=1, help="Number of MPI processes (per node)"
),
),
(
"group",
dict(
type=int,
default=1,
help="Number of processes to group into single job",
),
),
(
"serial",
dict(
action="store_true",
default=False,
help="Run grouped commands serially. Works for MPI",
),
),
(
"procs_scale",
dict(
action="store_true",
default=False,
help="Scale time and memory by number of processes",
),
),
]
)
def add_arguments(self, parser=None, add_group=True):
"""
Add job submission arguments to an argparse.ArgumentParser.
Arguments
---------
parser : argparse.ArgumentParser
The parser to which to add arguments. If None, a new parser will
be made and returned.
add_group : bool or string
Whether to add job submit options in an argument group.
If a string, use as description for the group.
Returns
-------
parser : argparse.ArgumentParser
The updated argument parser object.
"""
if parser is None:
parser = ap.ArgumentParser()
if add_group:
if not isinstance(add_group, str):
add_group = "Job Submit Options"
group = parser.add_argument_group(add_group)
else:
group = parser
for arg, opts in self.opt_list.items():
if arg not in self.fixed_opts:
group.add_argument("--" + arg.replace("_", "-"), **opts)
return parser
def pop_job_opts(self, args_dict, pop_submit=True):
"""
Pop all of the job-related options from a dictionary of arguments
Arguments
---------
args_dict : dict
The dictionary to pop from
pop_submit : bool
Whether to also pop an argument named "submit"
"""
for key in self.opt_list:
args_dict.pop(key, None)
if pop_submit:
args_dict.pop("submit", None)
return args_dict
def set_job_opts(self, args, load_defaults=True, **kwargs):
"""
Set job submission options based on parsed arguments.
Keyword arguments will be passed to ``update``. Can be used to override
particular job submission options. Any argument to the ``batch_sub`` or
``batch_group`` functions can be overridden in this way.
Arguments
---------
args : argparse.Namespace or dict
The parsed command line arguments (from
argparse.ArgumentParser.parse_args()).
load_defaults : bool
Whether to automatically load the default value for options
"""
if isinstance(args, ap.Namespace):
args = vars(args)
else:
args = args.copy()
args.update(self.fixed_opts)
# get default values for any missing options
# can happen if called from python without doing argparse
if load_defaults:
for arg, opts in self.opt_list.items():
if arg not in args:
args[arg] = opts["default"]
if args["ppn"] is None:
args["ppn"] = args["group"]
scale = 1.0 if not args["procs_scale"] else float(args["mpi_procs"])
mem_scale = scale * | |
select_manu_window = FloatingWindow(menus=top_f_menus, offset=transform_offset, menu_name='select_top')
def create_select_menu(self, add_float=True):
self.select_menus = QMenu(self.select_top)
qt.change_button_color(self.select_menus, textColor=menu_text, bgColor=menu_bg, hiText=menu_high_text, hiBg=menu_high_bg, mode='window')
if add_float:#切り離しウィンドウメニュー
sel_action = self.select_menus.addAction(u'-----------------------------------------------------✂----')
sel_action.triggered.connect(self.create_f_sel_menu)
#self.select_menus.setTearOffEnabled(True)#ティアオフ可能にもできる
self.check_sel_highlight()
mag = lang.Lang(en='Selection Child Highlighting : Always highlight',
ja=u'選択項目の子 : 常にハイライト')
self.sel_action00 = QAction(mag.output(), self.select_menus, icon=QIcon(self.sel_highlight[0]))
self.sel_action00.triggered.connect(lambda : self.set_sel_highlight(mode=0))
self.select_menus.addAction(self.sel_action00)
mag = lang.Lang(en='Selection Child Highlighting : Never highlight',
ja=u'選択項目の子 : ハイライトしない')
self.sel_action01 = QAction(mag.output(), self.select_menus, icon=QIcon(self.sel_highlight[1]))
self.sel_action01.triggered.connect(lambda : self.set_sel_highlight(mode=1))
self.select_menus.addAction(self.sel_action01)
mag = lang.Lang(en='Selection Child Highlighting : Use object Setting',
ja=u'選択項目の子 : オブジェクト設定を使用')
self.sel_action02 = QAction(mag.output(), self.select_menus, icon=QIcon(self.sel_highlight[2]))
self.sel_action02.triggered.connect(lambda : self.set_sel_highlight(mode=2))
self.select_menus.addAction(self.sel_action02)
self.sch_buts = [self.sel_action00, self.sel_action01, self.sel_action02]
self.select_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
cld_icon = self.check_click_drag_highlight()
mag = lang.Lang(en='Click drag select',
ja=u'クリック&ドラッグで選択')
self.sel_action03 = QAction(mag.output(), self.select_menus, icon=QIcon(cld_icon))
self.sel_action03.triggered.connect(self.set_click_drag)
self.select_menus.addAction(self.sel_action03)
#----------------------------------------------------------------------------------------------------
self.select_menus.addSeparator()#分割線追加
self.load_mouse_setting()
mag = lang.Lang(en='Left Click Enable mouse gesture input',
ja=u'左クリック マウスジェスチャー入力有効')
self.action31 = self.select_menus.addAction(mag.output())
self.action31.triggered.connect(self.change_l_gesture)
mag = lang.Lang(en='Center Click Enable mouse gesture input',
ja=u'中クリック マウスジェスチャー入力有効')
self.action32 = self.select_menus.addAction(mag.output())
self.action32.triggered.connect(self.change_c_gesture)
mag = lang.Lang(en='Right Click Enable mouse gesture input',
ja=u'右クリック マウスジェスチャー入力有効')
self.action33 = self.select_menus.addAction(mag.output())
self.action33.triggered.connect(self.change_r_gesture)
self.set_mouse_gesture()
#----------------------------------------------------------------------------------------------------
if maya_ver >= 2016:
self.select_menus.addSeparator()#分割線追加
self.load_axis_select_setting()
mag = lang.Lang(en='Maya axis selection mode (click to toggle selection state)',
ja=u'Maya 軸選択モード(クリックで選択状態をトグル)')
self.action34 = self.select_menus.addAction(mag.output())
self.action34.triggered.connect(self.change_axis_select_mode)
mag = lang.Lang(en='SI axis selection mode (multiple selection by Shift + click)',
ja=u'SI 軸選択モード(Shift+クリックで複数選択)')
self.action35 = self.select_menus.addAction(mag.output())
self.action35.triggered.connect(self.change_axis_select_mode)
self.set_axis_op_icon()
return self.select_menus
def set_axis_op_icon(self):
if self.axis_select_operation == 'maya_selection':
self.action34.setIcon(QIcon(image_path+self.check_icon))
self.action35.setIcon(QIcon(None))
else:
self.action35.setIcon(QIcon(image_path+self.check_icon))
self.action34.setIcon(QIcon(None))
def change_axis_select_mode(self):
if self.axis_select_operation == 'maya_selection':
self.axis_select_operation = 'si_selection'
self.set_axis_op_icon()
else:
self.axis_select_operation = 'maya_selection'
self.set_axis_op_icon()
self.save_axis_select_setting()
def save_axis_select_setting(self):
self.axis_select_setting_path = self.dir_path+'\\sisidebar_axis_operation_'+str(maya_ver)+'.json'
if not os.path.exists(self.dir_path):
os.makedirs(self.dir_path)
save_data = {}
save_data['axis_select_mode'] = self.axis_select_operation
with open(self.axis_select_setting_path, 'w') as f:
json.dump(save_data, f)
def load_axis_select_setting(self):
self.axis_select_setting_path = self.dir_path+'\\sisidebar_axis_operation_'+str(maya_ver)+'.json'
if os.path.isfile(self.axis_select_setting_path):#保存ファイルが存在したら
with open(self.axis_select_setting_path, 'r') as f:
save_data = json.load(f)
try:
self.axis_select_operation = save_data['axis_select_mode']
except:
self.axis_select_operation = 'maya_selection'
else:
self.axis_select_operation = 'maya_selection'
#self.axis_select_operation
def set_mouse_gesture(self):
self.set_r_gesture()
self.set_c_gesture()
self.set_l_gesture()
def change_l_gesture(self):
global left_click_gesture
if left_click_gesture:
left_click_gesture = False
else:
left_click_gesture = True
self.set_l_gesture()
self.save_mouse_setting()
def change_c_gesture(self):
global center_click_gesture
if center_click_gesture:
center_click_gesture = False
else:
center_click_gesture = True
self.set_c_gesture()
self.save_mouse_setting()
def change_r_gesture(self):
global right_click_gesture
if right_click_gesture:
right_click_gesture = False
else:
right_click_gesture = True
self.set_r_gesture()
self.save_mouse_setting()
#マウスジェスチャーの有効無効を切り替え
def set_l_gesture(self):
global left_click_gesture
if left_click_gesture:
self.action31.setIcon(QIcon(image_path+self.check_icon))
else:
self.action31.setIcon(QIcon(None))
def set_c_gesture(self):
global center_click_gesture
if center_click_gesture:
self.action32.setIcon(QIcon(image_path+self.check_icon))
else:
self.action32.setIcon(QIcon(None))
def set_r_gesture(self):
global right_click_gesture
if right_click_gesture:
self.action33.setIcon(QIcon(image_path+self.check_icon))
else:
self.action33.setIcon(QIcon(None))
#print 'set_r_setting', right_click_gesture
def save_mouse_setting(self):
self.mouse_setting_path = self.dir_path+'\\sisidebar_mouse_data_'+str(maya_ver)+'.json'
global right_click_gesture
global center_click_gesture
global left_click_gesture
if not os.path.exists(self.dir_path):
os.makedirs(self.dir_path)
save_data = {}
save_data['r_mouse'] = right_click_gesture
save_data['c_mouse'] = center_click_gesture
save_data['l_mouse'] = left_click_gesture
with open(self.mouse_setting_path, 'w') as f:
json.dump(save_data, f)
def load_mouse_setting(self):
self.mouse_setting_path = self.dir_path+'\\sisidebar_mouse_data_'+str(maya_ver)+'.json'
global right_click_gesture
global center_click_gesture
global left_click_gesture
if os.path.isfile(self.mouse_setting_path):#保存ファイルが存在したら
with open(self.mouse_setting_path, 'r') as f:
save_data = json.load(f)
try:
right_click_gesture = save_data['r_mouse']
center_click_gesture = save_data['c_mouse']
left_click_gesture = save_data['l_mouse']
except:
right_click_gesture = True
center_click_gesture = True
left_click_gesture = True
else:
right_click_gesture = True
center_click_gesture = True
left_click_gesture = True
#print 'load mouse'
def check_click_drag_highlight(self):
if cmds.selectPref(q=True, cld=True):
return image_path+self.check_icon
else:
return None
#子のハイライト設定を変更
def set_click_drag(self):
if cmds.selectPref(q=True, cld=True):
self.sel_action03.setIcon(QIcon(None))
cmds.selectPref(cld=False)
else:
self.sel_action03.setIcon(QIcon(image_path+self.check_icon))
cmds.selectPref(cld=True)
#子のハイライト設定を取得
def check_sel_highlight(self):
self.sel_highlight = [None]*3
icon_path = image_path+self.check_icon
sch = cmds.selectPref(q=True, sch=True)
self.sel_highlight[sch] = icon_path
#子のハイライト設定を変更
def set_sel_highlight(self, mode=0):
cmds.selectPref(sch=mode)
self.check_sel_highlight()
for i, but in enumerate(self.sch_buts):
but.setIcon(QIcon(self.sel_highlight[i]))
#コンテキストメニューとフローティングメニューを再帰的に作成する
def create_f_trans_menu(self):#ウィンドウ切り離しの場合はインスタンスを別にして再作成
top_f_menus = self.create_trans_menu(add_float=False)
global transform_manu_window
try:
transform_manu_window.close()
except:
pass
transform_manu_window = FloatingWindow(menus=top_f_menus, offset=transform_offset, menu_name='transform_top')
global trs_window_flag
trs_window_flag = True
def create_trans_menu(self, add_float=True):
self.trans_menus = QMenu(self.transform_top)
qt.change_button_color(self.trans_menus, textColor=menu_text, bgColor=menu_bg, hiText=menu_high_text, hiBg=menu_high_bg, mode='window')
if add_float:#切り離しウィンドウメニュー
action10 = self.trans_menus.addAction(u'-----------------------------------------------------✂----')
action10.triggered.connect(self.create_f_trans_menu)
#self.trans_menus.setTearOffEnabled(True)#ティアオフ可能にもできる
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Transform Preference...',
ja=u'変換設定')
action25 = QAction(mag.output(), self.trans_menus, icon=QIcon(image_path+'setting'))
self.trans_menus.addAction(action25)
action25.triggered.connect(lambda : self.pop_option_window(mode='transform'))
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Reset Actor to Bind Pose',
ja=u'リセットアクター/バインドポーズに戻す')
action12 = self.trans_menus.addAction(mag.output())
action12.triggered.connect(transform.reset_actor)
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Transfer Rotate to Joint Orient',
ja=u'回転をジョイントの方向に変換')
action17 = self.trans_menus.addAction(mag.output())
action17.triggered.connect(qt.Callback(lambda : transform.set_joint_orient(reset=True)))
mag = lang.Lang(en='Transfer Joint Orient to Rotate',
ja=u'ジョイントの方向を回転に変換')
action18 = self.trans_menus.addAction(mag.output())
action18.triggered.connect(qt.Callback(lambda : transform.set_joint_orient(reset=False)))
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Set Neutral Pose Node (UnLock Attr)',
ja=u'ニュートラルポーズノードを設定 (ロックなし)')
action26 = self.trans_menus.addAction(mag.output())
action26.triggered.connect(qt.Callback(lambda : toggle_center_mode(mode=True, ntpose=True, lock=False)))
mag = lang.Lang(en='Set Neutral Pose Node (Lock Attr)',
ja=u'ニュートラルポーズノードを設定 (ロック)')
action28 = self.trans_menus.addAction(mag.output())
action28.triggered.connect(qt.Callback(lambda : toggle_center_mode(mode=True, ntpose=True, lock=True)))
mag = lang.Lang(en='Remove Neutral Pose Node',
ja=u'ニュートラルポーズノードを解除')
action27 = self.trans_menus.addAction(mag.output())
action27.triggered.connect(qt.Callback(lambda : toggle_center_mode(mode=False, ntpose=True)))
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Reset All Transforms(with Pivot)',
ja=u'すべての変換をリセット(ピボットもリセット)')
action13 = self.trans_menus.addAction(mag.output())
action13.triggered.connect(qt.Callback(lambda : transform.reset_transform(mode='all', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Reset All Transforms(without Pivot)',
ja=u'すべての変換をリセット(ピボットはリセットしない)')
action29 = self.trans_menus.addAction(mag.output())
action29.triggered.connect(qt.Callback(lambda : transform.reset_transform(mode='all', c_comp=self.child_comp_but.isChecked(),
reset_pivot=False)))
mag = lang.Lang(en='Reset Scaling',
ja=u'スケーリングのリセット')
action14 = self.trans_menus.addAction(mag.output())
action14.triggered.connect(qt.Callback(lambda : transform.reset_transform(mode='scale', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Reset Rotation',
ja=u'回転のリセット')
action15 = self.trans_menus.addAction(mag.output())
action15.triggered.connect(qt.Callback(lambda : transform.reset_transform(mode='rot', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Reset Translation(with Pivot)',
ja=u'移動のリセット(ピボットもリセット)')
action16 = self.trans_menus.addAction(mag.output())
action16.triggered.connect(qt.Callback(lambda : transform.reset_transform(mode='trans', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Reset Translation(without Pivot)',
ja=u'移動のリセット(ピボットはリセットしない)')
action30 = self.trans_menus.addAction(mag.output())
action30.triggered.connect(qt.Callback(lambda : transform.reset_transform(mode='trans', c_comp=self.child_comp_but.isChecked(),
reset_pivot=False)))
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Freeze All Transforms',
ja=u'すべての変換をフリーズ')
action0 = self.trans_menus.addAction(mag.output())
action0.triggered.connect(qt.Callback(lambda : transform.freeze_transform(mode='all', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Freeze Scaling',
ja=u'スケーリングのフリーズ')
action1 = self.trans_menus.addAction(mag.output())
action1.triggered.connect(qt.Callback(lambda : transform.freeze_transform(mode='scale', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Freeze Rotation',
ja=u'回転のフリーズ')
action2 = self.trans_menus.addAction(mag.output())
action2.triggered.connect(qt.Callback(lambda : transform.freeze_transform(mode='rot', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Freeze Translation',
ja=u'移動のフリーズ')
action3 = self.trans_menus.addAction(mag.output())
action3.triggered.connect(qt.Callback(lambda : transform.freeze_transform(mode='trans', c_comp=self.child_comp_but.isChecked())))
mag = lang.Lang(en='Freeze Joint Orientation',
ja=u'ジョイントの方向のフリーズ')
action4 = self.trans_menus.addAction(mag.output())
action4.triggered.connect(qt.Callback(lambda : transform.freeze_transform(mode='joint', c_comp=self.child_comp_but.isChecked())))
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Round All Transform / Decimal'+str(round_decimal_value),
ja=u'すべての変換を丸める / 桁数'+str(round_decimal_value))
self.action20 = self.trans_menus.addAction(mag.output())
self.action20.triggered.connect(qt.Callback(lambda : transform.round_transform(mode='all', digit=round_decimal_value)))
mag = lang.Lang(en='Round Scaling / Decimal'+str(round_decimal_value),
ja=u'スケーリングを丸める / 桁数'+str(round_decimal_value))
self.action21 = self.trans_menus.addAction(mag.output())
self.action21.triggered.connect(qt.Callback(lambda : transform.round_transform(mode='scale', digit=round_decimal_value)))
mag = lang.Lang(en='Round Rotation / Decimal'+str(round_decimal_value),
ja=u'回転を丸める / 桁数'+str(round_decimal_value))
self.action22 = self.trans_menus.addAction(mag.output())
self.action22.triggered.connect(qt.Callback(lambda : transform.round_transform(mode='rotate', digit=round_decimal_value)))
mag = lang.Lang(en='Round Translation / Decimal'+str(round_decimal_value),
ja=u'移動値を丸める / 桁数'+str(round_decimal_value))
self.action23 = self.trans_menus.addAction(mag.output())
self.action23.triggered.connect(qt.Callback(lambda : transform.round_transform(mode='translate', digit=round_decimal_value)))
mag = lang.Lang(en='Round Joint Orient / Decimal'+str(round_decimal_value),
ja=u'ジョイントの方向を丸める / 桁数'+str(round_decimal_value))
self.action24 = self.trans_menus.addAction(mag.output())
self.action24.triggered.connect(qt.Callback(lambda : transform.round_transform(mode='jointOrient', digit=round_decimal_value)))
self.round_action_list = [self.action20, self.action21, self.action22, self.action23, self.action24]
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Match All Transform',
ja=u'すべての変換の一致')
action6 = self.trans_menus.addAction(mag.output())
action6.triggered.connect(lambda : transform.match_transform(mode='all', child_comp=self.child_comp_but.isChecked()))
mag = lang.Lang(en='Match Scaling',
ja=u'スケーリングの一致')
action7 = self.trans_menus.addAction(mag.output())
action7.triggered.connect(lambda : transform.match_transform(mode='scale', child_comp=self.child_comp_but.isChecked()))
mag = lang.Lang(en='Match Rotation',
ja=u'回転の一致')
action8 = self.trans_menus.addAction(mag.output())
action8.triggered.connect(lambda : transform.match_transform(mode='rotate', child_comp=self.child_comp_but.isChecked()))
mag = lang.Lang(en='Match Translation',
ja=u'移動値の一致')
action9 = self.trans_menus.addAction(mag.output())
action9.triggered.connect(lambda : transform.match_transform(mode='translate', child_comp=self.child_comp_but.isChecked()))
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
mag = lang.Lang(en='Move Center to Selection (All selection)',
ja=u'センターを選択に移動(すべての選択)')
action5 = self.trans_menus.addAction(mag.output())
action5.triggered.connect(qt.Callback(transform.move_center2selection))
mag = lang.Lang(en='Move Center to Selection (Each object)',
ja=u'センターを選択に移動(オブジェクトごと)')
action11 = self.trans_menus.addAction(mag.output())
action11.triggered.connect(qt.Callback(transform.move_center_each_object))
self.trans_menus.addSeparator()#分割線追加
#----------------------------------------------------------------------------------------------------
self.trs_setting_path = self.dir_path+'\\sisidebar_trs_data_'+str(maya_ver)+'.json'
#print self.trs_setting_path
self.cp_mag = lang.Lang(en=u'Collapse Point For Snapping/Absolute Translation',
ja=u'スナップ移動/絶対移動でポイントを集約')
if add_float:
self.action19 = self.trans_menus.addAction(self.cp_mag.output())
if cp_abs_flag:
self.action19.setIcon(QIcon(image_path+self.check_icon))
else:
self.action19.setIcon(QIcon(None))
self.action19.triggered.connect(self.toggle_cp_absolute)
else:
self.f_action19 = self.trans_menus.addAction(self.cp_mag.output())
if cp_abs_flag:
self.f_action19.setIcon(QIcon(image_path+self.check_icon))
else:
self.f_action19.setIcon(QIcon(None))
self.f_action19.triggered.connect(self.toggle_cp_absolute)
#Mayaのマニプハンドルを乗っ取る設定
self.hl_mag = lang.Lang(en=u'Force Side Bar axis selection status',
ja=u'サイドバーの軸選択状態を優先する')
if add_float:
self.action20 = self.trans_menus.addAction(self.hl_mag.output())
if ommit_manip_link:
self.action20.setIcon(QIcon(image_path+self.check_icon))
else:
self.action20.setIcon(QIcon(None))
self.action20.triggered.connect(self.toggle_manip_priority)
else:
self.f_action20 = self.trans_menus.addAction(self.hl_mag.output())
if ommit_manip_link:
self.f_action20.setIcon(QIcon(image_path+self.check_icon))
else:
self.f_action20.setIcon(QIcon(None))
self.f_action20.triggered.connect(self.toggle_manip_priority)
#self.trans_menus.setTearOffEnabled(True)#ティアオフ可能にもできる
return self.trans_menus
#マニプ優先設定を切り替える
def toggle_manip_priority(self):
global ommit_manip_link
#print 'pre_cp_abs_flag', cp_abs_flag
if ommit_manip_link:
ommit_manip_link = False
else:
ommit_manip_link = True
self.save_transform_setting()
if ommit_manip_link:
set_icon = QIcon(image_path+self.check_icon)
else:
set_icon = QIcon(None)
try:
self.f_action20.setIcon(set_icon)
except Exception as e:
pass
#top_menus = self.create_trans_menu()
#self.transform_top.setMenu(top_menus)
#絶対値に移動を切り替える
def toggle_cp_absolute(self):
global cp_abs_flag
#print 'pre_cp_abs_flag', cp_abs_flag
if cp_abs_flag:
cp_abs_flag = False
else:
cp_abs_flag = True
self.save_transform_setting()
if cp_abs_flag:
set_icon = QIcon(image_path+self.check_icon)
else:
set_icon = QIcon(None)
try:
self.f_action19.setIcon(set_icon)
except Exception as e:
pass
#top_menus = self.create_trans_menu()
#self.transform_top.setMenu(top_menus)
#絶対値に移動を切り替える
def toggle_action_check(self, item_id, flags, flag_str):
global cp_abs_flag
global ommit_manip_link
try:
exec('f_item = self.f_action'+str(item_id))
except Exception as e:
print e.message
exec('m_item = self.action'+str(item_id))
#print 'pre_cp_abs_flag', cp_abs_flag
#print flags
if flags:
exec(flag_str+' = False')
else:
exec(flag_str+' = True')
exec('print '+flag_str)
self.save_transform_setting()
if flags:
try:
f_item.setIcon(QIcon(image_path+self.check_icon))
except Exception as e:
print e.message
pass
top_menus = self.create_trans_menu()
self.transform_top.setMenu(top_menus)
else:
try:
f_item.setIcon(QIcon(None))
except Exception as e:
print e.message
pass
top_menus = self.create_trans_menu()
self.transform_top.setMenu(top_menus)
#print 'cp_abs_flag', cp_abs_flag
def load_transform_setting(self):
global cp_abs_flag
global ommit_manip_link
if os.path.isfile(self.trs_setting_path):#保存ファイルが存在したら
with open(self.trs_setting_path, 'r') as f:
save_data = json.load(f)
try:
cp_abs_flag = save_data['cp_abs']
ommit_manip_link = save_data['manip_link']
except:
cp_abs_flag = False
ommit_manip_link = False
def save_transform_setting(self):
if not os.path.exists(self.dir_path):
os.makedirs(self.dir_path)
save_data = {}
save_data['cp_abs'] = cp_abs_flag
save_data['manip_link'] = ommit_manip_link
with open(self.trs_setting_path, 'w') as f:
json.dump(save_data, f)
#リセットアクターバインドポーズを実行
def reset_actor(self):
joint_animation.reset_actor()
def all_toggle_snapping(self):
flag = any(but.isChecked() for but in self.snap_section_but[:-1])
#print flag
if flag:
check_flag | |
<reponame>triump0870/Interactive_Programming_Python
# Simple implementation of GalaxyInvanders game
# <NAME> (India) - 3 Nov 2013
# www.codeskulptor.org/#user23_fTVPDKIDhRdCfUp
VER = "1.0"
# "add various aliens"
import simplegui, math, random, time
#Global const
FIELD_WIDTH = 850
FIELD_HEIGHT = 500
TOP_MARGIN = 75
LEFT_MARGIN = 25
ALIEN_WIDTH = 48
ALIEN_HEIGHT = 55
PLAYER_SPEED = 10
BULLET_SPEED = 10
BULLET_POWER = 1
BONUS_SPEED = 10
ALIEN_SPEED = [3, 5]
# Images:
pImage = simplegui.load_image('https://dl.dropbox.com/s/zhnjucatewcmfs4/player.png')
aImages = []
for i in range(7):
aImages.append([])
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/0cck7w6r0mt8pzz/alien_1_1.png'))
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/j0kubnhzajbdngu/alien_1_2.png'))
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/zkeu6hqh9bakj25/alien_1_3.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/e75mkcylat70lnd/alien_2_1.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/pgjvaxg0z6rhco9/alien_2_2.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/en0hycfsi3cuzuo/alien_2_3.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/fu9weoll70acs8f/alien_3_1.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/b2rxru2nt5q2r1u/alien_3_2.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/x66vgj9fc2jlg53/alien_3_3.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/7o04ljg52kniyac/alien_4_1.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/b3v6tvami0rvl6r/alien_4_2.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/j451arcevsag36h/alien_4_3.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/jlhdigkm79nncnm/alien_5_1.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/wvlvjsa8yl6gka3/alien_5_2.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/rrg4y1tnsbrh04r/alien_5_3.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/oufyfy590tzf7cx/alien_6_1.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/p4ehd9f6mo2xfzc/alien_6_2.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/815gq3xyh6wmc0t/alien_6_3.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/bv4ycocuomsvj50/alien_7_1.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/krs2gtvdxxve79z/alien_7_2.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/v2wczi8lxwczq87/alien_7_3.png'))
#backgrounds
bckg = []
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ibfu2t9vrh4bhxd/back01.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/pcl8vzby25ovis8/back02.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/g8nwo1t9s4i9usg/back03.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ee8oilluf7pe98h/back04.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/7jfgjoxinzwwlx4/back05.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/wh01g2q3607snvz/back06.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/b72ltp2xii9utnr/back07.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/av73jek8egezs1w/back08.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ik54ttfklv3x3ai/back09.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/e9e6kpyg3yuoenc/back10.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/zrabwnnvlwvn7it/back11.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/a2infkx0rmn8b8m/back12.jpg"))
# sounds
sndPlayer = simplegui.load_sound('https://dl.dropbox.com/s/vl3as0o2m2wvlwu/player_shoot.wav')
sndAlien = simplegui.load_sound('https://dl.dropbox.com/s/m4x0tldpze29hcr/alien_shoot.wav')
sndPlayerExplosion = simplegui.load_sound('https://dl.dropbox.com/s/10fn2wh7kk7uoxh/explosion%2001.wav')
sndAlienHit = simplegui.load_sound('https://dl.dropbox.com/s/80qdvup27n8j6r1/alien_hit.wav')
sndAlienExplosion = simplegui.load_sound('https://dl.dropbox.com/s/qxm3je9vdlb469g/explosion_02.wav')
sndBonus = simplegui.load_sound('https://dl.dropbox.com/s/tzp7e20e5v19l01/bonus.wav')
sndPause = simplegui.load_sound('https://dl.dropbox.com/s/uzs9nixpd22asno/pause.wav')
sndTheme = simplegui.load_sound('https://dl.dropbox.com/s/52zo892uemfkuzm/theme_01.mp3')
sounds = [sndPlayer, sndAlien, sndPlayerExplosion, sndAlienExplosion, \
sndBonus, sndPause, sndTheme, sndAlienHit]
#Global variables
GameRunning = False
GameEnded = False
player_speed = 0
mes = ""
timer_counter = 0
lives = 0
level = 1
scores = 0
killed = 0
current_back = 0
paused = False
shoot_count = 0
level_time = []
ready, go = False, False
#player = [FIELD_WIDTH //2, FIELD_HEIGHT - 30 + TOP_MARGIN]
#game objects
user_bullet = []
weapon_level = 1
weapon_speed = BULLET_SPEED
alien_bullets = []
alien_fleet = None
player = None
frame = None
aTimer = None
dTimer = None
bonuses = []
dCounter = 0
back = False
bonus_count = [0, 0, 0, 0]
player_killed = False
player_killed_at = 0
level_map = []
for i in range(7):
level_map.append([])
level_map[0] = [ 0, 0, 0, 0]
level_map[1] = [129, 0, 0, 0]
level_map[2] = [195, 129, 0, 0]
level_map[3] = [255, 195, 60, 0]
level_map[4] = [255, 231, 195, 195]
level_map[5] = [255, 255, 231, 195]
level_map[6] = [255, 255, 255, 231]
def draw_text(canvas, text, point, size, delta, color):
canvas.draw_text(text, point, size, color[0])
canvas.draw_text(text, [point[0]-delta[0], \
point[1]-delta[1]], size, color[1])
class Bonus:
def __init__ (self, kind, point):
self.kind = kind
self.x = point[0]
self.y = point[1]
self.v = BONUS_SPEED #velocity
self.width = 36
self.height = 36
return self
def move(self):
self.y += self.v
return self
def draw(self, canvas):
if self.kind == 0: #speed of bullet
canvas.draw_circle([self.x, self.y], 15, 3, "LightBlue")
canvas.draw_text("WS", [self.x-12, self.y+5], self.width //2, "LightBlue")
elif self.kind == 1: #weapon level
canvas.draw_circle([self.x, self.y], 15, 3, "Red")
canvas.draw_text("WL", [self.x-12, self.y+5], self.width //2, "Red")
elif self.kind == 2: #life
canvas.draw_circle([self.x, self.y], 15, 3, "LightGreen")
canvas.draw_text("LF", [self.x-12, self.y+5], self.width //2, "LightGreen")
elif self.kind == 3: #weapon power
canvas.draw_circle([self.x, self.y], 15, 3, "8010df")
canvas.draw_text("WP", [self.x-12, self.y+5], self.width //2, "8010df")
return self
def execute(self):
global weapon_speed, weapon_level, player, scores, bonus_count
bonus_count[self.kind] += 1
if self.kind == 0: #speed of bullet
weapon_speed += 1
delta = round(math.pow(20, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
elif self.kind == 1: #weapon level
weapon_level += 1
delta = round(math.pow(30, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
elif self.kind == 2: #life
player.lives += 1
delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
elif self.kind == 3: #weapon power
player.power += 0.1
delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
sndBonus.play()
return self
def dHandler():
global dCounter, back, player_killed
dCounter += 1
if dCounter % 10 == 0:
if back:
frame.set_canvas_background("Red")
else:
frame.set_canvas_background("black")
back = not back;
if dCounter > 50:
dCounter = 0
player_killed = False
dTimer.stop()
frame.set_canvas_background("black")
class Bullet:
def __init__ (self, point, color, velocity):
self.x = point[0]
self.y = point[1]
self.color = color
self.v = velocity
self.width = 1
self.height = 1
def draw(self, canvas):
canvas.draw_line([self.x, self.y-5], [self.x, self.y+5], 3, self.color)
def move(self):
self.y += self.v
class Alien:
def __init__(self, point, kind):
self.x = point[0]
self.y = point[1]
self.kind = kind
self.flying = False
self.vy = 0
self.vx = 0
self.health = self.get_max_health()
self.width = 20
self.height = 20
def get_max_health(self):
return 1+0.6 * self.kind[1]
def shoot(self):
if len(alien_bullets)<level*2:
bullet = Bullet([self.x, self.y], "LightRed", BULLET_SPEED)
alien_bullets.append(bullet)
sndAlien.play()
def move(self, point):
if self.flying:
koef = 1.5
self.y += (self.vy / koef)
if self.x>player.x:
self.x -= (self.vx / koef)
else:
self.x += (self.vx / koef)
if self.vx<ALIEN_SPEED[0]:
self.vx += 1
if self.vy<ALIEN_SPEED[1]:
self.vy += 1
else:
self.x = point[0]
self.y = point[1]
def draw(self, canvas):
if aImages[self.kind[1]][self.kind[0]].get_width()==0:
w = 15
h = 15
canvas.draw_circle([self.x, self.y], 15, 5, "Red")
else:
# img = aImages[self.kind[1]][self.kind[0]]
img = aImages[self.kind[1]][self.kind[0]]
self.width = w = img.get_width()
self.height = h = img.get_height()
canvas.draw_image(img, (w//2, h//2), (w, h), (self.x, self.y), (w, h))
if self.health<>self.get_max_health():
ratio = w * (self.health*1.0) / self.get_max_health()
canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x+w//2, self.y-h//2-3], 4, "red")
canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x-w//2+ratio, self.y-h//2-3], 4, "green")
return canvas
class AliensFleet:
def __init__ (self, point):
def is_high_level(place):
map_ = (level-1)%7
row = level_map[map_][place[1]] #255 - 0
return (row & (1 << place[0]))<>0
self.x = point[0]
self.y = point[1]
self.aliens = []
self.pattern = [255, 255, 255, 255]
self.y_velocity = ALIEN_HEIGHT//3 + 1
self.x_velocity = - ALIEN_WIDTH//3 + 1
for i in range(self.get_aliens_count()):
point = self.get_alien_position(i)
place = self.get_alien_place(i)
alien_level = (level-1)//7 + is_high_level(place)
alien = Alien(point, [random.randrange(3), alien_level])
self.aliens.append(alien)
def get_aliens_count(self):
c = 0
for i in range(4):
for j in range(8):
if (self.pattern[i] & (1 << j))<>0:
c+=1
return c
def get_alien_position(self, n):
#returns a screen x, y of alien with number n
point = self.get_alien_place(n)
x = point[0]*(ALIEN_WIDTH + 3) + self.x
y = point[1]*(ALIEN_HEIGHT + 3) +self.y
point = [x, y]
return point
def get_alien_place(self, n):
#returns a fleet x, y of alien with number n
x, y, c = 0, 0, 0
for i in range(4):
for j in range(8):
if (self.pattern[i] & (1 << j))<>0:
if c==n:
x, y = j, i
c+=1
point = [x, y]
return point
def move_aliens(self):
i = 0
for alien in self.aliens:
point = self.get_alien_position(i)
alien.move(point)
i += 1
return self
def move_down(self):
self.y += self.y_velocity
if self.y>400:
player.explode()
self.y = 100
self.move_aliens()
def move_side(self):
self.x -= self.x_velocity
# check borders of fleet:
left = 8
right = -1
for i in range(len(self.aliens)):
point = self.get_alien_place(i)
if point[0]<left:
left = point[0]
if point[0]>right:
right = point[0]
if (self.x+(left+1)*60 < LEFT_MARGIN + 10) or (self.x + (right+1)*45>FIELD_WIDTH-LEFT_MARGIN-60):
self.x_velocity = -self.x_velocity
self.move_aliens()
def draw(self, canvas):
for alien in self.aliens:
alien.draw(canvas)
def make_shoot(self):
for alien in self.aliens:
if len(alien_bullets) < level * 3 + 1:
if random.randrange(101)<2: #
alien.shoot()
return self
def alien_fly(self):
i = 0
for alien in self.aliens:
if alien.flying:
i += 1
if (i<1+level) and (random.randrange(1000)<3) and (time.time()-level_time[len(level_time)-1]>60):
alien.flying=True
def check_death(self):
global scores, killed, player
i = 0
for bullet in user_bullet:
for i in range(len(self.aliens)):
alien = self.aliens[i]
if isBulletHit(bullet, alien):
if alien.health-player.power<=0:
point = self.get_alien_place(i)
sndAlienExplosion.play()
self.aliens.remove(alien)
x = ~int((1 << point[0]))
self.pattern[point[1]] = self.pattern[point[1]] & x
user_bullet.remove(bullet)
delta = round(math.pow(5, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
killed += 1
x = random.randrange(1000)
if x<5:
bonus = Bonus(3, [alien.x, alien.y])
bonuses.append(bonus)
elif x<50:
bonus = Bonus(2, [alien.x, alien.y])
bonuses.append(bonus)
elif x<120:
bonus = Bonus(1, [alien.x, alien.y])
bonuses.append(bonus)
elif x<200:
bonus = Bonus(0, [alien.x, alien.y])
bonuses.append(bonus)
if killed % 500 == 0:
player.lives += 1
sndBonus.play()
break
else:
user_bullet.remove(bullet)
alien.health -= player.power
sndAlienHit.play()
i += 1
class Player:
def __init__(self, point, lives):
self.x = point[0]
self.y = point[1]
self.lives = 3
self.speed = player_speed
self.power = BULLET_POWER
self.width = 20
self.height = 20
def draw(self, canvas):
draw_user_image(canvas, [self.x, self.y])
def move(self):
self.x += player_speed
if self.x<LEFT_MARGIN*2:
self.x = LEFT_MARGIN*2
if self.x>FIELD_WIDTH:
self.x=FIELD_WIDTH
def draw_lives_counter(self, canvas):
if self.lives < 5:
for i in range(self.lives):
draw_user_image(canvas, [150+i*35, 15])
else:
draw_user_image(canvas, [150, 15])
canvas.draw_text(" x "+str(int(self.lives)), [170, 25], 25, "Yellow")
def explode(self):
global dTimer, alien_bullets, user_bullet, weapon_level, weapon_speed
global alien_fleet, player_killed_at, player_killed, player_speed
player_speed = 0
player_killed_at = time.time()
sndPlayerExplosion.play()
for alien in alien_fleet.aliens:
alien.flying = False
player_killed = True
alien_bullets = []
user_bullet = []
bonuses = []
weapon_level = level // 10 + 1
weapon_speed = BULLET_SPEED
self.lives -= 1
if self.lives<0:
stop_game()
dTimer = simplegui.create_timer(25, dHandler)
dTimer.start()
#helper functions
def dummy(key):
return key
def pause():
global paused
paused = not paused
sndPause.play()
def draw_user_image(canvas, point):
# draw a image of user ship
#
global player
if pImage.get_width()==0:
canvas.draw_circle(point, 12, 5, "Yellow")
else:
canvas.draw_image(pImage, (25, 36), (49, 72), point, (34, 50))
player.width = pImage.get_width()
player.height = pImage.get_height()
return canvas
def draw_lives(canvas):
# draw lives counter
canvas.draw_text("Lives : | |
<filename>scripts/analytics_tests.py
# Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python -m testtools.run tests'. To run specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
from netaddr import IPNetwork
import fixtures
from util import *
from netaddr import *
import logging as LOG
import re
import json
import urllib2
import requests
import time
import datetime
import threading
import Queue
from subprocess import Popen, PIPE
import shlex
from netaddr import *
import random
months = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun':
6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
months_number_to_name = {
'01': 'JAN', '02': 'FEB', '03': 'MAR', '04': 'APR', '05': 'MAY',
'06': 'JUN', '07': 'JUL', '08': 'AUG', '09': 'SEP', '10': 'OCT', '11': 'NOV', '12': 'DEC'}
uve_dict = {
'xmpp-peer/': ['state_info', 'peer_stats_info', 'event_info', 'send_state', 'identifier'],
'config-node/': ['module_cpu_info', 'module_id', 'cpu_info', 'build_info', 'config_node_ip', 'process_state_list'],
'control-node/': ['uptime', 'build_info', 'cpu_info', 'ifmap_info', 'process_state_list'],
'analytics-node/': ['cpu_info', 'ModuleCpuState', 'module_cpu_info', 'process_state_list', 'redis-query', 'contrail-qe',
'contrail-collector', 'contrail-analytics-nodemgr', 'redis-uve', 'contrail-opserver', 'build_info',
'generator_infos'],
'generator/': ['client_info', 'ModuleServerState', 'session_stats', 'generator_info'],
'bgp-peer/': ['state_info', 'peer_stats_info', 'families', 'peer_type', 'local_asn',
'configured_families', 'event_info', 'peer_address', 'peer_asn', 'send_state'],
'vrouter/': ['exception_packets', 'cpu_info', 'uptime', 'total_flows', 'drop_stats', 'xmpp_stats_list', 'vhost_stats', 'process_state_list',
'control_ip', 'dns_servers', 'build_info', 'vhost_cfg', 'tunnel_type', 'xmpp_peer_list', 'self_ip_list'],
'dns-node/': ['start_time', 'build_info', 'self_ip_list']}
uve_list = ['xmpp-peer/', 'config-node/', 'control-node/',
'analytics-node/', 'generator/', 'bgp-peer/', 'dns-node/', 'vrouter/']
class AnalyticsVerification(fixtures.Fixture):
def __init__(self, inputs, api_server_inspect, cn_inspect, agent_inspect, ops_inspect, logger=LOG):
self.inputs = inputs
self.ops_inspect = ops_inspect
self.api_s_inspect = api_server_inspect
self.agent_inspect = agent_inspect
self.cn_inspect = cn_inspect
self.logger = logger
self.get_all_generators()
def get_all_generators(self):
self.generator_hosts = []
self.bgp_hosts = []
self.compute_hosts = []
self.collector_hosts = []
self.cfgm_host = self.inputs.host_data[self.inputs.cfgm_ip]['name']
if (self.cfgm_host not in self.generator_hosts):
self.generator_hosts.append(self.cfgm_host)
# collector_ip=self.inputs.collector_ip
# self.collector_host=self.inputs.host_data[collector_ip]['name']
for collector_ip in self.inputs.collector_ips:
c_host = self.inputs.host_data[collector_ip]['name']
self.collector_hosts.append(c_host)
if (c_host not in self.generator_hosts):
self.generator_hosts.append(c_host)
for ip in self.inputs.bgp_ips:
bgp_host = self.inputs.host_data[ip]['name']
self.bgp_hosts.append(bgp_host)
if (bgp_host not in self.generator_hosts):
self.generator_hosts.append(bgp_host)
for ip in self.inputs.compute_ips:
compute_host = self.inputs.host_data[ip]['name']
self.compute_hosts.append(compute_host)
if (compute_host not in self.generator_hosts):
self.generator_hosts.append(compute_host)
def get_connection_status(self, collector, generator, moduleid, node_type, instanceid='0'):
'''Getting connection status with generator:node_type:moduleid:instanceid with collector
'''
connobj = self.get_connection_dict(
collector, generator, moduleid, node_type, instanceid)
if connobj:
return connobj['status']
else:
return None
def get_primary_collector(self, opserver, generator, moduleid, node_type, instanceid='0'):
'''Get primary collector for a generator'''
connobj = self.get_connection_dict(
opserver, generator, moduleid, node_type, instanceid)
if connobj:
return connobj['primary']
else:
return None
def get_secondary_collector(self, opserver, generator, moduleid, node_type, instanceid='0'):
'''Get secondary collector for a generator'''
connobj = self.get_connection_dict(
opserver, generator, moduleid, node_type, instanceid)
if connobj:
return connobj['secondary']
else:
return None
def get_connection_dict(self, collector, generator, moduleid, node_type, instanceid):
'''Getting connection dict with generator:moduleid with collector
'''
#import pdb;pdb.set_trace()
self.opsobj = self.ops_inspect[collector].get_ops_generator(
generator=generator, moduleid=moduleid, node_type=node_type, instanceid=instanceid)
if not self.opsobj:
self.logger.warn("query returned none")
return None
self.conoutput = self.opsobj.get_attr('Client', 'client_info')
if not self.conoutput:
self.logger.info("query returned none")
return None
return self.conoutput
@retry(delay=5, tries=4)
def verify_connection_status(self, generator, moduleid, node_type, instanceid='0'):
'''Verify if connection status with collector and generator:node_type:moduleid:instance
is established
'''
self.g = generator
self.m = moduleid
result = True
for collector_ip in self.inputs.collector_ips:
self.logger.info("Verifying through opserver in %s" %
(collector_ip))
status = self.get_connection_status(
collector_ip, self.g, self.m, node_type, instanceid)
if (status == 'Established'):
self.logger.info("%s:%s:%s:%s is connected to collector %s" %
(self.g, node_type, self.m, instanceid, collector_ip))
result = result & True
else:
self.logger.warn(
"%s:%s:%s:%s is NOT connected to collector %s" %
(self.g, node_type, self.m, instanceid, collector_ip))
result = result & False
return result
def get_collector_of_gen(self, collector, gen, module, node_type, instance='0'):
'''Gets the collector node of a generator
'''
connobj = self.get_connection_dict(
collector, gen, module, node_type, instance)
return connobj['collector_name']
def get_all_generator_links(self, module=None):
'''Get all links for a particular generator'''
ret = []
try:
links = self.ops_inspect[self.inputs.collector_ips[
0]].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='generators')
if links:
pattern = '%s(.*)' % module
compiled = re.compile(pattern)
for elem in links:
if compiled.search(str(elem)):
ret.append(elem)
except Exception as e:
self.logger.warn("Got exception as %s" % (e))
finally:
return ret
def get_module_instances(self, module):
'''Return the module instances from analytics/genarators url'''
ret = []
try:
links = self.get_all_generator_links(module=module)
if links:
for elem in links:
inst = str(elem['name']).split(":")[-1]
ret.append(inst)
except Exception as e:
self.logger.warn("Got exception as %s" % (e))
finally:
return ret
def get_uve_key(self, uve=None):
'''{
href: "http://10.204.216.14:8081/analytics/uves/virtual-machine/292c7779-c085-4079-91f6-440272bd2922?flat",
name: "292c7779-c085-4079-91f6-440272bd2922"
}'''
ret = []
try:
links = self.ops_inspect[self.inputs.collector_ips[0]
].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType=uve)
if links:
for elem in links:
ret.append(elem['name'])
except Exception as e:
self.logger.warn("Got exception as %s" % (e))
finally:
return ret
# def get_gen_by_collector(self):
# '''Test module nodea29:ControlNode'''
# self.opsobj=self.ops_inspect.get_ops_generator(generator='nodea29',moduleid='ControlNode',node_type='Control',instanceid='0')
# self.g=self.opsobj.get_attr('Server', 'generator_info',match= ('status','0'))
# import pdb;pdb.set_trace()
# return self.g
# self.f=self.opsobj.get_attr('Client', 'client_info',match='Established')
# self.a=self.opsobj.get_attr('Server', 'generator_info')
# self.b=self.opsobj.get_attr('Client', 'client_info')
# self.ops=self.ops_inspect.get_ops_vroutern(vrouter='nodea19')
# self.c=self.ops.get_attr('Agent', 'xmpp_peer_list')
# self.op=self.ops_inspect.get_ops_bgprouter(bgprouter='nodea29')
# self.d=self.op.get_attr('Control', 'num_xmpp_peer')
# self.o1=self.ops_inspect.get_ops_vn(vn='default-domain:admin:vn1')
# self.d1=self.o1.get_attr('Agent', 'virtualmachine_list')
# self.o2=self.ops_inspect.get_ops_vm(vm='2c41bd1e-8104-4a9b-abde-5ccd0183d544')
# self.d2=self.o2.get_attr('Agent', 'interface_list')
# self.o3=[]
# self.o3=self.ops_inspect.get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='bgp-routers')
# gen_list=[]
# for elem in self.o3:
# name=elem.get_attr('Name')
# gen_list.append(name)
# import pdb;pdb.set_trace()
# return self.g
# Collector uve functions#
# ------------------------#
# @retry(delay=5, tries=1)
def verify_collector_uve(self):
'''Verify that all generators are connected to collector'''
result = True
# Verify module-ids correctly shown in the collector uve for respective generators
# verify module-id for bgp node in collector uve - should be
# 'ControlNode'
for ip in self.inputs.collector_ips:
self.logger.info("Verifying through opserver in %s" % (ip))
expected_module_id = ['ControlNode', 'DnsAgent']
expected_node_type = 'Control'
expected_instance_id = '0'
for bgp_host in self.bgp_hosts:
for module in expected_module_id:
is_established = self.verify_connection_status(
bgp_host, module, expected_node_type, expected_instance_id)
# collector=self.output['collector_name']
if is_established:
#self.logger.info("%s:%s connected to collector %s"%(bgp_host,module,collector))
result = result and True
else:
result = result and False
expected_module_id = 'VRouterAgent'
expected_node_type = 'Compute'
expected_instance_id = '0'
for compute_host in self.compute_hosts:
is_established = self.verify_connection_status(
compute_host, expected_module_id, expected_node_type, expected_instance_id)
# collector=self.output['collector_name']
if is_established:
result = result and True
else:
result = result and False
# Verifying module_id from ApiServer
expected_cfgm_modules = 'Schema'
expected_node_type = 'Config'
expected_instance_id = '0'
for cfgm_node in self.inputs.cfgm_names:
result1 = True
is_established = self.verify_connection_status(
cfgm_node, expected_cfgm_modules, expected_node_type, expected_instance_id)
if is_established:
# collector=self.output['collector_name']
result1 = result1 and True
break
else:
result1 = result1 and False
result = result and result1
expected_cfgm_modules = 'ServiceMonitor'
expected_node_type = 'Config'
expected_instance_id = '0'
for cfgm_node in self.inputs.cfgm_names:
result1 = True
is_established = self.verify_connection_status(
cfgm_node, expected_cfgm_modules, expected_node_type, expected_instance_id)
if is_established:
# collector=self.output['collector_name']
resulti1 = result1 and True
break
else:
result1 = result1 and False
result = result and result1
# Verifying module_id ApiServer
expected_apiserver_module = 'ApiServer'
expected_apiserver_instances = self.get_module_instances(
expected_apiserver_module)
expected_node_type = 'Config'
# expected_cfgm_modules=['Schema','ServiceMonitor']
for cfgm_node in self.inputs.cfgm_names:
for inst in expected_apiserver_instances:
is_established = self.verify_connection_status(
cfgm_node, expected_apiserver_module, expected_node_type, inst)
if is_established:
result = result and True
else:
result = result and False
# Verifying module_id OpServer
expected_opserver_module = 'OpServer'
expected_opserver_instances = self.get_module_instances(
expected_opserver_module)
expected_node_type = 'Analytics'
for c_host in self.collector_hosts:
for inst in expected_opserver_instances:
is_established = self.verify_connection_status(
c_host, expected_opserver_module, expected_node_type, inst)
if is_established:
# collector=self.output['collector_name']
result = result and True
else:
result = result and False
# Verifying collector:moduleid
expected_collector_module = ['Collector', 'QueryEngine']
expected_node_type = 'Analytics'
expected_instance_id = '0'
for c_host in self.collector_hosts:
for module in expected_collector_module:
is_established = self.verify_connection_status(
c_host, module, expected_node_type, expected_instance_id)
# collector=self.output['collector_name']
if is_established:
result = result and True
else:
result = result and False
return result
@retry(delay=3, tries=15)
def verify_hrefs_to_all_uves_of_a_given_uve_type(self):
'''Verify all analytics links
'''
result = True
for ip in self.inputs.collector_ips:
self.logger.info(
"Verifying the bgp-routers links through opserver %s" % (ip))
self.links = self.ops_inspect[
ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='control-nodes')
gen_list = []
for elem in self.links:
name = elem.get_attr('Name')
gen_list.append(name)
missing_nodes = set(gen_list) ^ set(self.inputs.bgp_names)
if not missing_nodes:
self.logger.info("%s is present in the link" %
(self.inputs.bgp_names))
result = result and True
else:
self.logger.info(
"%s is not present in the in the bgp-routers" %
(missing_nodes))
result = result and False
self.logger.info(
"Verifying the vrouters links through opserver %s" % (ip))
self.links = self.ops_inspect[
ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='vrouters')
gen_list = []
for elem in self.links:
name = elem.get_attr('Name')
gen_list.append(name)
for name in self.inputs.compute_names:
if (name in gen_list):
# import pdb;pdb.set_trace()
# missing_nodes=set(gen_list)^set(self.inputs.compute_names)
# if not missing_nodes:
self.logger.info("%s is present in the link" % (name))
result = result and True
else:
self.logger.info(
"%s is not present in the in the vrouters" % (name))
result = result and False
self.logger.info(
"Verifying the collector links through opserver %s" % (ip))
self.links = self.ops_inspect[
ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='analytics-nodes')
gen_list = []
for elem in self.links:
name = elem.get_attr('Name')
gen_list.append(name)
missing_nodes = set(gen_list) ^ set(self.inputs.collector_names)
if not missing_nodes:
self.logger.info("%s is present in the link" | |
<gh_stars>1-10
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Stores the environment changes necessary for Pigweed."""
import contextlib
import json
import os
import re
# The order here is important. On Python 2 we want StringIO.StringIO and not
# io.StringIO. On Python 3 there is no StringIO module so we want io.StringIO.
# Not using six because six is not a standard package we can expect to have
# installed in the system Python.
try:
from StringIO import StringIO # type: ignore
except ImportError:
from io import StringIO
# Disable super() warnings since this file must be Python 2 compatible.
# pylint: disable=super-with-arguments
# goto label written to the end of Windows batch files for exiting a script.
_SCRIPT_END_LABEL = '_pw_end'
class BadNameType(TypeError):
pass
class BadValueType(TypeError):
pass
class EmptyValue(ValueError):
pass
class NewlineInValue(TypeError):
pass
class BadVariableName(ValueError):
pass
class UnexpectedAction(ValueError):
pass
class _Action(object): # pylint: disable=useless-object-inheritance
def unapply(self, env, orig_env):
pass
def json(self, data):
pass
def write_deactivate(self,
outs,
windows=(os.name == 'nt'),
replacements=()):
pass
class _VariableAction(_Action):
# pylint: disable=keyword-arg-before-vararg
def __init__(self, name, value, allow_empty_values=False, *args, **kwargs):
super(_VariableAction, self).__init__(*args, **kwargs)
self.name = name
self.value = value
self.allow_empty_values = allow_empty_values
self._check()
def _check(self):
try:
# In python2, unicode is a distinct type.
valid_types = (str, unicode)
except NameError:
valid_types = (str, )
if not isinstance(self.name, valid_types):
raise BadNameType('variable name {!r} not of type str'.format(
self.name))
if not isinstance(self.value, valid_types):
raise BadValueType('{!r} value {!r} not of type str'.format(
self.name, self.value))
# Empty strings as environment variable values have different behavior
# on different operating systems. Just don't allow them.
if not self.allow_empty_values and self.value == '':
raise EmptyValue('{!r} value {!r} is the empty string'.format(
self.name, self.value))
# Many tools have issues with newlines in environment variable values.
# Just don't allow them.
if '\n' in self.value:
raise NewlineInValue('{!r} value {!r} contains a newline'.format(
self.name, self.value))
if not re.match(r'^[A-Z_][A-Z0-9_]*$', self.name, re.IGNORECASE):
raise BadVariableName('bad variable name {!r}'.format(self.name))
def unapply(self, env, orig_env):
if self.name in orig_env:
env[self.name] = orig_env[self.name]
else:
env.pop(self.name, None)
def _var_form(variable, windows=(os.name == 'nt')):
if windows:
return '%{}%'.format(variable)
return '${}'.format(variable)
class Set(_VariableAction):
"""Set a variable."""
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
value = self.value
for var, replacement in replacements:
if var != self.name:
value = value.replace(replacement, _var_form(var, windows))
if windows:
outs.write('set {name}={value}\n'.format(name=self.name,
value=value))
else:
outs.write('{name}="{value}"\nexport {name}\n'.format(
name=self.name, value=value))
def write_deactivate(self,
outs,
windows=(os.name == 'nt'),
replacements=()):
del replacements # Unused.
if windows:
outs.write('set {name}=\n'.format(name=self.name))
else:
outs.write('unset {name}\n'.format(name=self.name))
def apply(self, env):
env[self.name] = self.value
def json(self, data):
data['set'][self.name] = self.value
class Clear(_VariableAction):
"""Remove a variable from the environment."""
def __init__(self, *args, **kwargs):
kwargs['value'] = ''
kwargs['allow_empty_values'] = True
super(Clear, self).__init__(*args, **kwargs)
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
del replacements # Unused.
if windows:
outs.write('set {name}=\n'.format(**vars(self)))
else:
outs.write('unset {name}\n'.format(**vars(self)))
def apply(self, env):
if self.name in env:
del env[self.name]
def json(self, data):
data['set'][self.name] = None
def _initialize_path_like_variable(data, name):
default = {'append': [], 'prepend': [], 'remove': []}
data['modify'].setdefault(name, default)
def _remove_value_from_path(variable, value, pathsep):
return ('{variable}="$(echo "${variable}"'
' | sed "s|{pathsep}{value}{pathsep}|{pathsep}|g;"'
' | sed "s|^{value}{pathsep}||g;"'
' | sed "s|{pathsep}{value}$||g;"'
')"\nexport {variable}\n'.format(variable=variable,
value=value,
pathsep=pathsep))
class Remove(_VariableAction):
"""Remove a value from a PATH-like variable."""
def __init__(self, name, value, pathsep, *args, **kwargs):
super(Remove, self).__init__(name, value, *args, **kwargs)
self._pathsep = pathsep
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
value = self.value
for var, replacement in replacements:
if var != self.name:
value = value.replace(replacement, _var_form(var, windows))
if windows:
pass
# TODO(pwbug/231) This does not seem to be supported when value
# contains a %variable%. Disabling for now.
# outs.write(':: Remove\n:: {value}\n:: from\n:: {name}\n'
# ':: before adding it back.\n'
# 'set {name}=%{name}:{value}{pathsep}=%\n'.format(
# name=self.name, value=value, pathsep=self._pathsep))
else:
outs.write('# Remove \n# {value}\n# from\n# {value}\n# before '
'adding it back.\n')
outs.write(_remove_value_from_path(self.name, value,
self._pathsep))
def apply(self, env):
env[self.name] = env[self.name].replace(
'{}{}'.format(self.value, self._pathsep), '')
env[self.name] = env[self.name].replace(
'{}{}'.format(self._pathsep, self.value), '')
def json(self, data):
_initialize_path_like_variable(data, self.name)
data['modify'][self.name]['remove'].append(self.value)
if self.value in data['modify'][self.name]['append']:
data['modify'][self.name]['append'].remove(self.value)
if self.value in data['modify'][self.name]['prepend']:
data['modify'][self.name]['prepend'].remove(self.value)
class BadVariableValue(ValueError):
pass
def _append_prepend_check(action):
if '=' in action.value:
raise BadVariableValue('"{}" contains "="'.format(action.value))
class Prepend(_VariableAction):
"""Prepend a value to a PATH-like variable."""
def __init__(self, name, value, join, *args, **kwargs):
super(Prepend, self).__init__(name, value, *args, **kwargs)
self._join = join
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
value = self.value
for var, replacement in replacements:
if var != self.name:
value = value.replace(replacement, _var_form(var, windows))
value = self._join(value, _var_form(self.name, windows))
if windows:
outs.write('set {name}={value}\n'.format(name=self.name,
value=value))
else:
outs.write('{name}="{value}"\nexport {name}\n'.format(
name=self.name, value=value))
def write_deactivate(self,
outs,
windows=(os.name == 'nt'),
replacements=()):
value = self.value
for var, replacement in replacements:
if var != self.name:
value = value.replace(replacement, _var_form(var, windows))
outs.write(
_remove_value_from_path(self.name, value, self._join.pathsep))
def apply(self, env):
env[self.name] = self._join(self.value, env.get(self.name, ''))
def _check(self):
super(Prepend, self)._check()
_append_prepend_check(self)
def json(self, data):
_initialize_path_like_variable(data, self.name)
data['modify'][self.name]['prepend'].append(self.value)
if self.value in data['modify'][self.name]['remove']:
data['modify'][self.name]['remove'].remove(self.value)
class Append(_VariableAction):
"""Append a value to a PATH-like variable. (Uncommon, see Prepend.)"""
def __init__(self, name, value, join, *args, **kwargs):
super(Append, self).__init__(name, value, *args, **kwargs)
self._join = join
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
value = self.value
for var, repl_value in replacements:
if var != self.name:
value = value.replace(repl_value, _var_form(var, windows))
value = self._join(_var_form(self.name, windows), value)
if windows:
outs.write('set {name}={value}\n'.format(name=self.name,
value=value))
else:
outs.write('{name}="{value}"\nexport {name}\n'.format(
name=self.name, value=value))
def write_deactivate(self,
outs,
windows=(os.name == 'nt'),
replacements=()):
value = self.value
for var, replacement in replacements:
if var != self.name:
value = value.replace(replacement, _var_form(var, windows))
outs.write(
_remove_value_from_path(self.name, value, self._join.pathsep))
def apply(self, env):
env[self.name] = self._join(env.get(self.name, ''), self.value)
def _check(self):
super(Append, self)._check()
_append_prepend_check(self)
def json(self, data):
_initialize_path_like_variable(data, self.name)
data['modify'][self.name]['append'].append(self.value)
if self.value in data['modify'][self.name]['remove']:
data['modify'][self.name]['remove'].remove(self.value)
class BadEchoValue(ValueError):
pass
class Echo(_Action):
"""Echo a value to the terminal."""
def __init__(self, value, newline, *args, **kwargs):
# These values act funny on Windows.
if value.lower() in ('off', 'on'):
raise BadEchoValue(value)
super(Echo, self).__init__(*args, **kwargs)
self.value = value
self._newline = newline
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
del replacements # Unused.
# POSIX shells parse arguments and pass to echo, but Windows seems to
# pass the command line as is without parsing, so quoting is wrong.
if windows:
if self._newline:
if not self.value:
outs.write('echo.\n')
else:
outs.write('echo {}\n'.format(self.value))
else:
outs.write('<nul set /p="{}"\n'.format(self.value))
else:
# TODO(mohrr) use shlex.quote().
outs.write('if [ -z "${PW_ENVSETUP_QUIET:-}" ]; then\n')
if self._newline:
outs.write(' echo "{}"\n'.format(self.value))
else:
outs.write(' echo -n "{}"\n'.format(self.value))
outs.write('fi\n')
def apply(self, env):
pass
class Comment(_Action):
"""Add a comment to the init script."""
def __init__(self, value, *args, **kwargs):
super(Comment, self).__init__(*args, **kwargs)
self.value = value
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
del replacements # Unused.
comment_char = '::' if windows else '#'
for line in self.value.splitlines():
outs.write('{} {}\n'.format(comment_char, line))
def apply(self, env):
pass
class Command(_Action):
"""Run a command."""
def __init__(self, command, *args, **kwargs):
exit_on_error = kwargs.pop('exit_on_error', True)
super(Command, self).__init__(*args, **kwargs)
assert isinstance(command, (list, tuple))
self.command = command
self.exit_on_error = exit_on_error
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
del replacements # Unused.
# TODO(mohrr) use shlex.quote here?
outs.write('{}\n'.format(' '.join(self.command)))
if not self.exit_on_error:
return
if windows:
outs.write(
'if %ERRORLEVEL% neq 0 goto {}\n'.format(_SCRIPT_END_LABEL))
else:
# Assume failing command produced relevant output.
outs.write('if [ "$?" -ne 0 ]; then\n return 1\nfi\n')
def apply(self, env):
pass
class BlankLine(_Action):
"""Write a blank line to the init script."""
def write( # pylint: disable=no-self-use
self,
outs,
windows=(os.name == 'nt'),
replacements=()):
del replacements, windows # Unused.
outs.write('\n')
def apply(self, env):
pass
class Function(_Action):
def __init__(self, name, body, *args, **kwargs):
super(Function, self).__init__(*args, **kwargs)
self._name = name
self._body = body
def write(self, outs, windows=(os.name == 'nt'), replacements=()):
del replacements # Unused.
if windows:
return
outs.write("""
{name}() {{
{body}
}}
""".strip().format(name=self._name, body=self._body))
def apply(self, env):
pass
class Hash(_Action):
def write( # pylint: disable=no-self-use
self,
outs,
windows=(os.name == 'nt'),
replacements=()):
del replacements # Unused.
if windows:
return
outs.write('''
# This should detect bash and zsh, which have a hash command that must be
# called to get it to forget past commands. Without forgetting past
# commands the $PATH changes we made may not be respected.
if | |
from __future__ import division
import torch
import math
import random
from PIL import Image
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
import numbers
import types
import collections
import warnings
import matplotlib.pyplot as plt
from torchvision.transforms import functional
import PIL
INTER_MODE = {'NEAREST': cv2.INTER_NEAREST, 'BILINEAR': cv2.INTER_LINEAR, 'BICUBIC': cv2.INTER_CUBIC}
PAD_MOD = {'constant': cv2.BORDER_CONSTANT,
'edge': cv2.BORDER_REPLICATE,
'reflect': cv2.BORDER_DEFAULT,
'symmetric': cv2.BORDER_REFLECT
}
def imshow(inps, title=None):
"""Imshow for Tensor."""
subwindows = len(inps)
for idx, (inp, name) in enumerate(zip(inps, title)):
inp = inp.numpy().transpose((1, 2, 0))
ax = plt.subplot(1, subwindows, idx+1)
ax.axis('off')
plt.imshow(inp)
ax.set_title(name)
# plt.pause(0.001)
plt.show()
# plt.waitforbuttonpress(-1)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def to_tensor(pic):
"""Converts a numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
Args:
pic (np.ndarray, torch.Tensor): Image to be converted to tensor, (H x W x C[RGB]).
Returns:
Tensor: Converted image.
"""
if _is_numpy_image(pic):
if len(pic.shape) == 2:
pic = cv2.cvtColor(pic, cv2.COLOR_GRAY2RGB)
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor) or img.max() > 1:
return img.float().div(255)
else:
return img
elif _is_tensor_image(pic):
return pic
else:
try:
return to_tensor(np.array(pic))
except Exception:
raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))
def to_cv_image(pic, mode=None):
"""Convert a tensor to an ndarray.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (str): color space and pixel depth of input data (optional)
for example: cv2.COLOR_RGB2BGR.
Returns:
np.array: Image converted to PIL Image.
"""
if not (_is_numpy_image(pic) or _is_tensor_image(pic)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
npimg = pic
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if torch.is_tensor(pic):
npimg = np.squeeze(np.transpose(pic.numpy(), (1, 2, 0)))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if mode is None:
return npimg
else:
return cv2.cvtColor(npimg, mode)
def normalize(tensor, mean, std):
"""Normalize a tensor image with mean and standard deviation.
See ``Normalize`` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channely.
Returns:
Tensor: Normalized Tensor image.
"""
if _is_tensor_image(tensor):
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
elif _is_numpy_image(tensor):
return (tensor.astype(np.float32) - 255.0 * np.array(mean))/np.array(std)
else:
raise RuntimeError('Undefined type')
def resize(img, size, interpolation='BILINEAR'):
"""Resize the input CV Image to the given size.
Args:
img (np.ndarray): Image to be resized.
size (tuple or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (str, optional): Desired interpolation. Default is ``BILINEAR``
Returns:
cv Image: Resized image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
h, w, c = img.shape
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return cv2.resize(img, dsize=(ow, oh), interpolation=INTER_MODE[interpolation])
else:
oh = size
ow = int(size * w / h)
return cv2.resize(img, dsize=(ow, oh), interpolation=INTER_MODE[interpolation])
else:
oh, ow = size
return cv2.resize(img, dsize=(int(ow), int(oh)), interpolation=INTER_MODE[interpolation])
def to_rgb_bgr(pic):
"""Converts a color image stored in BGR sequence to RGB (BGR to RGB)
or stored in RGB sequence to BGR (RGB to BGR).
Args:
pic (np.ndarray, torch.Tensor): Image to be converted, (H x W x 3).
Returns:
Tensor: Converted image.
"""
if _is_numpy_image(pic) or _is_tensor_image(pic):
img = pic[:, :, [2, 1, 0]]
return img
else:
try:
return to_rgb_bgr(np.array(pic))
except Exception:
raise TypeError('pic should be numpy.ndarray or torch.Tensor. Got {}'.format(type(pic)))
def pad(img, padding, fill=(0, 0, 0), padding_mode='constant'):
"""Pad the given CV Image on all sides with speficified padding mode and fill value.
Args:
img (np.ndarray): Image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int, tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
constant: pads with a constant value, this value is specified with fill
edge: pads with the last value on the edge of the image
reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
CV Image: Padded image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Padding mode should be either constant, edge, reflect or symmetric'
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, collections.Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, collections.Sequence) and len(padding) == 4:
pad_left, pad_top, pad_right, pad_bottom = padding
if isinstance(fill, numbers.Number):
fill = (fill,) * (2 * len(img.shape) - 3)
if padding_mode == 'constant':
assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \
'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill))
img = cv2.copyMakeBorder(src=img, top=pad_top, bottom=pad_bottom, left=pad_left, right=pad_right,
borderType=PAD_MOD[padding_mode], value=fill)
return img
def crop(img, x, y, h, w):
"""Crop the given CV Image.
Args:
img (np.ndarray): Image to be cropped.
x: Upper pixel coordinate.
y: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
Returns:
CV Image: Cropped image.
"""
assert _is_numpy_image(img), 'img should be CV Image. Got {}'.format(type(img))
assert h > 0 and w > 0, 'h={} and w={} should greater than 0'.format(h, w)
x1, y1, x2, y2 = round(x), round(y), round(x+h), round(y+w)
try:
check_point1 = img[x1, y1, ...]
check_point2 = img[x2-1, y2-1, ...]
except IndexError:
# warnings.warn('crop region is {} but image size is {}'.format((x1, y1, x2, y2), img.shape))
img = cv2.copyMakeBorder(img, - min(0, x1), max(x2 - img.shape[0], 0),
-min(0, y1), max(y2 - img.shape[1], 0), cv2.BORDER_CONSTANT, value=[0, 0, 0])
y2 += -min(0, y1)
y1 += -min(0, y1)
x2 += -min(0, x1)
x1 += -min(0, x1)
finally:
return img[x1:x2, y1:y2, ...].copy()
def center_crop(img, output_size):
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
h, w, _ = img.shape
th, tw = output_size
i = int(round((h - th) * 0.5))
j = int(round((w - tw) * 0.5))
return crop(img, i, j, th, tw)
def resized_crop(img, i, j, h, w, size, interpolation='BILINEAR'):
"""Crop the given CV Image and resize it to desired size. Notably used in RandomResizedCrop.
Args:
img (np.ndarray): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
| |
<reponame>rimmartin/cctbx_project
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# $Id$
"""
A 'slippy map' widget for wxPython.
So why is this widget called 'pySlip'?
Well, in the OpenStreetMap world[1], a 'slippy map' is a browser map view
served by a tile server that can be panned and zoomed in the same way as
popularised by Google maps. Such a map feels 'slippery', I guess.
Rather than 'slippy' I went for the slightly more formal 'pySlip' since the
thing is written in Python and therefore must have the obligatory 'py' prefix.
Even though this was originally written for a geographical application, the
underlying system only assumes a cartesian 2D coordinate system. So pySlip
could be used to present a game map, 2D CAD view, or whatever. The major
difficulty for most uses is to generate the map tiles.
[1] http://wiki.openstreetmap.org/index.php/Slippy_Map
"""
from __future__ import division
from six.moves import range
# Copyright (c) 2010, <NAME> (<EMAIL>). All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import glob
from six.moves import cPickle as pickle
import wx
from scitbx.matrix import col
import math
# if we don't have log.py, don't crash
try:
import log
log = log.Log('pyslip.log', log.Log.DEBUG)
except Exception:
def log(*args, **kwargs):
pass
__version__ = '2.2'
__all__ = ['PySlip']
WX3 = wx.VERSION[0] == 3
# type of SELECT events
EventPointSelect = 0
EventBoxSelect = 1
EventRightPointSelect = 2
EventRightBoxSelect = 3
######
# utility routines.
######
def point_inside_polygon(x, y, poly):
"""Decide if point is inside polygon.
x x coord of point in question
y y coord of point in question
poly polygon in form [(x1,y1), (x2,y2), ...]
Returns True if point is properly inside polygon.
May return True or False if point on edge of polygon.
Slightly modified version of the 'published' algorithm found on the 'net.
Instead of indexing into the poly, create a new poly that 'wraps around'.
Even with the extra code, it runs in 2/3 the time.
"""
l_poly = list(poly)
new_poly = l_poly[:]
new_poly.append(l_poly[0])
inside = False
(p1x, p1y) = new_poly[0]
for (p2x, p2y) in new_poly:
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
(p1x, p1y) = (p2x, p2y)
return inside
######
# Base class for the widget canvas - buffered and flicker-free.
######
class _BufferedCanvas(wx.Panel):
"""Implements a buffered, flicker-free canvas widget.
This class is based on:
http://wiki.wxpython.org/index.cgi/BufferedCanvas
"""
# The backing buffer
buffer = None
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.NO_FULL_REPAINT_ON_RESIZE):
"""Initialize the canvas.
parent reference to 'parent' widget
id the unique widget ID
pos canvas position
size canvas size
style wxPython style
"""
wx.Panel.__init__(self, parent, id, pos, size, style)
# Bind events
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
# Disable background erasing (flicker-licious)
def disable_event(*pargs, **kwargs):
pass # the sauce, please
self.Bind(wx.EVT_ERASE_BACKGROUND, disable_event)
# set callback upon onSize event
self.onSizeCallback = None
def Draw(self, dc):
"""Stub: called when the canvas needs to be re-drawn."""
pass
def Update(self):
"""Causes the canvas to be updated."""
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
if WX3:
dc.BeginDrawing()
dc.Clear()
self.Draw(dc)
if WX3:
dc.EndDrawing()
def OnPaint(self, event):
"""Paint the canvas to the screen."""
# Blit the front buffer to the screen
wx.BufferedPaintDC(self, self.buffer)
def OnSize(self, event=None):
"""Create a new off-screen buffer to hold drawn data."""
(width, height) = self.GetClientSizeTuple() if WX3 else self.GetClientSize()
if width == 0:
width = 1
if height == 0:
height = 1
self.buffer = wx.EmptyBitmap(width, height)
self.view_width = width
self.view_height = height
# call onSize callback, if registered
if self.onSizeCallback:
self.onSizeCallback()
# Now update the screen
self.Update()
######
# Base class for a tile object - handles access to tiles.
######
class _Tiles(object):
"""An object to handle a pyslip tiles directory.
Uses 'elephant' caching - it never forgets!
TODO: Add more sophisticated limit + 'drop LRU' caching.
"""
# the name of the tile info file (under the main tile dir)
TileInfoFilename = 'tile.info'
# expected form of individual tile level directories (2 decimal digits)
TileFilenameTemplate = '[0-9][0-9]'
# name of picture file to use if tile missing (under the main tile dir)
MissingTileFilename = 'missing_tile.png'
# maximum number of tiles held in each level cache
MaxTileList = 4096
def __init__(self, tile_dir):
"""Initialise a Tiles instance.
tile_dir root directory of tiles
"""
# open top-level info file
self.tile_dir = tile_dir
info_file = os.path.join(tile_dir, self.TileInfoFilename)
try:
fd = open(info_file, 'rb')
(self.extent, self.tile_size,
self.sea_colour, self.land_colour) = pickle.load(fd)
fd.close()
except IOError:
msg = "'%s' doesn't appear to be a tile directory" % tile_dir
raise Exception(msg)
(self.tile_size_x, self.tile_size_y) = self.tile_size
# get list of tile levels
tile_mask = os.path.join(tile_dir, self.TileFilenameTemplate)
self.levels = [int(os.path.basename(l))
for l in glob.glob(os.path.join(tile_mask))]
# setup the tile caches and LRU lists
self.cache = {}
self.lru = {}
for l in self.levels:
self.cache[l] = {}
self.lru[l] = []
self.missing_tile = None
# set min and max tile levels
self.min_level = min(self.levels)
self.max_level = max(self.levels)
def UseLevel(self, n):
"""Prepare to serve tiles from the required level.
n The required level
Returns a tuple (map_width, map_height, ppd_x, ppd_y) if succesful,
else None. The width/height values are pixels. The ppd_? values are
pixels-per-degree values for X and Y direction.
"""
# try to get cache for this level, no cache means no level
try:
self.tile_cache = self.cache[n]
self.tile_list = self.lru[n]
except KeyError:
return None
# get tile info
info = self.GetInfo(n)
if info is None:
return None
(self.num_tiles_x, self.num_tiles_y, self.ppd_x, self.ppd_y) = info
# store partial path to level dir
self.tile_level_dir = os.path.join(self.tile_dir, '%02d' % n)
return (self.tile_size_x * self.num_tiles_x,
self.tile_size_y * self.num_tiles_y,
self.ppd_x, self.ppd_y)
def GetInfo(self, level):
"""Get tile info for a particular level.
level the level to get tile info for
Returns (num_tiles_x, num_tiles_y, ppd_x, ppd_y).
"""
# see if we can open the tile info file.
info_file = os.path.join(self.tile_dir, '%02d' % level,
self.TileInfoFilename)
try:
fd = open(info_file, 'rb')
except IOError:
return None
# OK, looks like we actually do have this level!
info = pickle.load(fd)
fd.close()
return info
def GetTile(self, x, y):
"""Get bitmap for tile at tile coords (x, y).
x X coord of tile required (tile coordinates)
y Y coord of tile required (tile coordinates)
Returns bitmap object containing the tile image.
Tile coordinates are measured from map top-left.
If tile is in cache, read from there, else read from file & put
into cache. Use LRU cache algorithm to limit memory usage.
"""
try:
# if tile in cache, return it from there
pic = self.tile_cache[(x, y)]
index = self.tile_list.index((x, y))
del self.tile_list[index]
except KeyError:
# tile *not* in cache: get image, cache and return it
img_name = os.path.join(self.tile_level_dir,
'tile_%d_%d.png' % (x, y))
# load tile as not in cache
if not os.path.exists(img_name):
# tile not there, use 'missing tile'
if not self.missing_tile:
# load missing tile
img_name = os.path.join(self.tile_dir,
self.MissingTileFilename)
img = wx.Image(img_name, wx.BITMAP_TYPE_ANY)
self.missing_tile = img.ConvertToBitmap()
pic = self.missing_tile
else:
# new tile, check if we must drop old tiles - | |
"""
Remove old build files.
"""
distfiles = self.packagename.replace("-", "_")
distdir = self._settings.DISTRIBUTABLE_DIR
self.clean()
for f in glob.glob(str(distdir / distfiles) + "*.*"):
remove_if_exists(f)
for f in glob.glob(str(distdir / self.packagename) + "*.*"):
remove_if_exists(f)
remove_if_empty(distdir)
def clean(self) -> None:
"""
Remove intermediate artifacts and folders.
"""
for f in glob.glob(str(self._settings.SRC_DIR / "*.egg-info")):
remove_if_exists(f)
remove_if_exists(self._settings.BUILD_DIR)
def ispassed(self) -> bool:
"""
Returns, whether the last run() call was successful and did not return issues.
Returns:
True, if the last call of run() was successful and did not return issues.
False, otherwise.
"""
return self._passed
def run(self) -> bool:
"""
Create a new source and binary (wheel) distribution.
Returns:
True, if operation was successful. False, otherwise.
"""
self.remove()
builddir = str(self._settings.DISTRIBUTABLE_DIR)
self._passed = not bool(pyexecute(["build", "-s", "-w", "-o", builddir]))
return self._passed
class DocInspector:
"""
Class for inspecting doc strings that have already been parsed by sphinx.
This class is useful for finding documented and undocumented code as well as
computing the documentation coverage. This way, you always know whether there is
documentation missing.
"""
REGEX_PARAMETERS = re.compile(r":param[ ]*([^:]+):")
REGEX_FIELD = re.compile(r":[^:]+:")
REGEX_DOC = re.compile(
r"(\"\"\".*?\"\"\"|#[^\n]*|\".*?\"|\'.*?\')", re.MULTILINE | re.DOTALL
)
REGEX_RETURN_NONE = re.compile(r"return([ ]*None|[ ]*[$\n])")
REGEX_RETURN = re.compile(r"return[ ]+(\w|\d|[\[{\(])")
REGEX_DOCRETURN = re.compile(r":return(s)?:[ ]*(\w|\d)+")
REGEX_NESTED = re.compile(
r"([ \t]+)(def|class)[ ]+[^:]+:\n(\1[ \t]+[^ ][^\n]+\n|[ ]*\n|\1[ \t]+[^ ])+"
)
UNUSED = "Unused"
DOCUMENTED = "Documented"
UNDOCUMENTED = "Undocumented"
KEY_TYPE = "type"
KEY_ISSUE = "issue"
KEY_WHAT = "what"
KEY_OBJNAME = "object_name"
KEY_FILE = "file"
KEY_LINES = "line_range"
KEY_TEXT = "text"
SECTION_ISSUES = "undocumented_elements"
SECTION_DOCUMENTED = "documented_elements"
ISSUE_UNDOC_PARAM = "undocumented parameter"
ISSUE_UNDOC_RETURN = "undocumented return value"
ISSUE_UNDOC_DESCRIPTION = "missing description"
ISSUE_UNUSED_PARAM = "documented but unused parameter"
JSON_INDENT = 4
def __init__(self, settings: Settings) -> None:
"""
Initializes the class with settings.
Args:
settings: Instance of a settings class to get settings from.
"""
self._settings = settings
self.undocumented = list()
self.missing = list()
self.log = list()
self.documented = {} # Number of documented elements in each file.
self.files = set()
def _getcleandoc(self, doc: List[str]):
"""
Applies Python's strip() function to each documentation line.
Applying strip() avoids errors during parsing.
Args:
doc: The documentation string to apply strip() to.
"""
return [line.strip() for line in doc if len(line.strip()) > 0]
def _fromsignature(self, subject: object, type: str) -> list:
"""
Returns all parameters that have been defined for the given subject object.
This method is used to obtain the list of parameters the given subject
was defined with. The list can then be used for comparing the documentation to.
Note that this does only work for functions and methods.
Args:
subject: The object for which a list of parameters shall be obtained.
type: The type of object as given by sphinx.
"""
if not callable(subject):
raise TypeError("Given subject is not callable.")
signature = list(inspect.signature(subject).parameters.keys())
# Filter the "self" - parameter.
if type == "method" and signature[0].lower() == "self":
signature.pop(0)
return signature
def _fromdocstring(self, lines: list) -> list:
"""
Returns all parameters that have been documented in the given docstring.
Args:
lines: The docstring as a list of strings as given by sphinx.
Returns:
A list of strings with all parameter names that have been mentioned
in the docstring by :param ... : .
"""
params = list()
doc = self._getcleandoc(lines)
for line in doc:
params += self.REGEX_PARAMETERS.findall(line)
return params
def save(self):
"""
Saves information about documentation coverage to a file.
The file is given in the settings as DOCUMENTATION_COVERAGE_FILE. A file is
always created, even if there is no data to write. If the file already exists,
it is overwritten.
"""
mkdirs_if_not_exists(Path(self._settings.TMP_DIR))
with open(self._settings.DOCUMENTATION_COVERAGE_FILE, "w") as f:
data = {
self.SECTION_DOCUMENTED: self.documented,
self.SECTION_ISSUES: self.log,
}
json.dump(data, f, indent=self.JSON_INDENT)
def load(self):
"""
Loads information about documentation coverage from a file.
The file is given in the settings as DOCUMENTATION_COVERAGE_FILE.
"""
covfile = self._settings.DOCUMENTATION_COVERAGE_FILE
if not os.path.isfile(covfile):
raise Exception(f"Documentation coverage file {covfile} does not exist.")
with open(self._settings.DOCUMENTATION_COVERAGE_FILE, "r") as f:
data = json.load(f)
self.documented = data[self.SECTION_DOCUMENTED]
self.log = data[self.SECTION_ISSUES]
def _getParameter(
self, subject: object, lines: list, type: str, check: str
) -> list:
"""
Returns a list of parameters that match a given check.
One can obtain a list of parameter names that match a given check. For example,
calling this method with check = DocInspector.UNDOCUMENTED returns a list of
undocumented parameter names. Note that this method only works for functions
and methods.
Args:
subject: The object to get the matching parameter list for.
lines: The docstring as given by sphinx.
type: The type of the subject as given by sphinx.
check: The type of match to perform. Allowed values are
UNDEFINED - Parameters that have been documented but are not part of the
actual signature.
DOCUMENTED - Parameters that have been documented.
UNDOCUMENTED - Parameters that have not been documented.
Returns:
A list of parameter names that match the given type of check.
"""
# These types are callable but do not have parameters.
if type in ["exception", "class"] or not callable(subject):
return list()
signature = self._fromsignature(subject, type)
documented = self._fromdocstring(lines)
if check == self.UNDOCUMENTED:
return [p for p in signature if p not in documented]
elif check == self.DOCUMENTED:
return [p for p in signature if p in documented]
elif check == self.UNUSED:
return [p for p in documented if p not in signature]
else:
raise Exception("Unknown check type.")
def _getDescription(self, lines: list):
"""
Returns the first descriptive line of the docstring, if present.
Typically, the first descriptive line is the brief. This method is used to
determine if there is any description present at all excluding parameter
documentation and other fields documented using :fieldname ... : .
Args:
lines: The docstring as given by sphinx.
Returns:
The first descriptive line of the docstring. Typically, the brief.
"""
doc = self._getcleandoc(lines)
if not doc:
return None
# A brief typically does not contain fieldnames.
if self.REGEX_FIELD.match(doc[0]) is not None:
return None
if len(doc[0]) == 0:
return None
return doc[0]
def _missingReturn(self, subject: object, lines: list, what: str, name):
"""
Determines, whether a return value documentation is missing.
If there is a non-None return value, it should be documented. Functions and
methods that return None can still document return values, e.g. to explicitly
state None or document why None is returned. That is deemed optional.
"""
if what not in ["method", "function"]:
return None
source = inspect.getsource(subject).strip() # type: ignore
# Find returns that are not None.
# First, remove docstrings, comments and strings.
cleaned = self.REGEX_DOC.sub("", source)
# Second, remove "return None" or return statements without value.
cleaned = self.REGEX_RETURN_NONE.sub("", cleaned)
# Third, remove nested functions and classes.
cleaned = self.REGEX_NESTED.sub("", cleaned)
# Find remaining return statements which mention a value or variable.
notNone = self.REGEX_RETURN.search(cleaned)
# Detect, whether return values are part of the docstring.
docret = self.REGEX_DOCRETURN.search("\n".join(lines))
return notNone is not None and docret is None
def process(self, app, what, name, obj, options, lines) -> None:
"""
Determines code that has not been properly documented.
This method is meant to be connected to the "autodoc-process-docstring" event of
sphinx. It parsed already parsed docstrings and compares the documentation
with the actual definition of the given object. Since the docstring has already
been parsed by sphinx, it does not matter which style was used for
documentation. The results are stored in files as given in the settings from
which the documentation coverage can be computed.
Args:
app: The Sphinx application object.
what: The type of the object which the docstring belongs to.
name: The fully qualified name of the object.
obj: The object itself.
options: The options given by sphinx.
lines: Docstring as given by sphinx.
"""
try:
file = inspect.getsourcefile(obj)
if not file:
raise Exception("No source file to process.")
with open(file) as f:
content = f.read().strip()
except Exception:
# Only evaluate objects for which a file can be determined.
# For example, properties are not | |
import inspect
import math
import re
import json
import sys
#ipython = get_ipython()
#def hide_traceback(exc_tuple=None, filename=None, tb_offset=None,
# exception_only=False, running_compiled_code=False):
# etype, value, tb = sys.exc_info()
# return ipython._showtraceback(etype, value, ipython.InteractiveTB.get_exception_only(etype, value))
# Uncomment after tests
#ipython.showtraceback = hide_traceback
def generate_data():
return [{"Year": "2013", "Total": "53,2", "Education": "80,4", "Arts and Humanities": "56,9", "Social Sciences, Management and Law": "58,1", "Sciences, Maths and Computer Science": "(R) 47,2", "Engineering": "(R) 27,4", "Agriculture": "56,9", "Health and Social Security": "76,8", "Services": "(R) 41,4"},
{"Year": "201", "Total": "22,2", "Education": "80,4", "Arts and Humanities": "16,9", "Social Sciences, Management and Law": "58,1", "Sciences, Maths and Computer Science": "(R) 47,2", "Engineering": "(R) 27,4", "Agriculture": "56,9", "Health and Social Security": "76,8", "Services": "(R) 41,4"},
{"Year": "2014", "Total": "53,5", "Education": "80,7", "Arts and Humanities": "58,0", "Social Sciences, Management and Law": "58,5", "Sciences, Maths and Computer Science": "(R) 47,5", "Engineering": "(R) 27,6", "Agriculture": "56,9", "Health and Social Security": "76,6", "Services": "(R) 41,1"},
{"Year": "2015", "Total": "53,6", "Education": "80,7", "Arts and Humanities": "58,7", "Social Sciences, Management and Law": "58,6", "Sciences, Maths and Computer Science": "(R) 47,8", "Engineering": "(R) 27,0", "Agriculture": "56,3", "Health and Social Security": "76,7", "Services": "(R) 41,0"},
{"Year": "2016", "Total": "53,4", "Education": "80,3", "Arts and Humanities": "58,6", "Social Sciences, Management and Law": "58,9", "Sciences, Maths and Computer Science": "(R) 45,7", "Engineering": "(R) 27,3", "Agriculture": "56,4", "Health and Social Security": "76,8", "Services": "(R) 41,7"},
{"Year": "2017", "Total": "53,6", "Education": "79,3", "Arts and Humanities": "59,0", "Social Sciences, Management and Law": "59,5", "Sciences, Maths and Computer Science": "44,2", "Engineering": "27,5", "Agriculture": "57,3", "Health and Social Security": "77,0", "Services": "42,1"},
{"Year": "2013", "Total": "52,2", "Education": "80,4", "Arts and Humanities": "57,9", "Social Sciences, Management and Law": "58,1", "Sciences, Maths and Computer Science": "(R) 47,2", "Engineering": "(R) 27,4", "Agriculture": "56,9", "Health and Social Security": "76,8", "Services": "(R) 41,4"},
{"Year": "2014", "Total": "53,5", "Education": "80,7", "Arts and Humanities": "58,0", "Social Sciences, Management and Law": "58,5", "Sciences, Maths and Computer Science": "(R) 47,5", "Engineering": "(R) 27,6", "Agriculture": "56,9", "Health and Social Security": "76,6", "Services": "(R) 41,1"},
{"Year": "2015", "Total": "53,6", "Education": "80,7", "Arts and Humanities": "58,7", "Social Sciences, Management and Law": "58,6", "Sciences, Maths and Computer Science": "(R) 47,8", "Engineering": "(R) 27,0", "Agriculture": "56,3", "Health and Social Security": "76,7", "Services": "(R) 41,0"},
{"Year": "2016", "Total": "53,4", "Education": "80,3", "Arts and Humanities": "58,6", "Social Sciences, Management and Law": "58,9", "Sciences, Maths and Computer Science": "(R) 45,7", "Engineering": "(R) 27,3", "Agriculture": "56,4", "Health and Social Security": "76,8", "Services": "(R) 41,7"}
]
def generate_data_comma():
return [{"year": "2013", "total": "53,2", "education": "80,4", "arts_and_humanities": "56,9", "social_sciences_management_and_law": "58,1", "sciences_maths_and_computer_science": "47,2", "engineering": "27,4", "Agriculture": "56,9", "health_and_social_security": "76,8", "services": "41,4"},
{"year": "201", "total": "22,2", "education": "80,4", "arts_and_humanities": "16,9", "social_sciences_management_and_law": "58,1", "sciences_maths_and_computer_science": "47,2", "engineering": "27,4", "Agriculture": "56,9", "health_and_social_security": "76,8", "services": "41,4"},
{"year": "2014", "total": "53,5", "education": "80,7", "arts_and_humanities": "58,0", "social_sciences_management_and_law": "58,5", "sciences_maths_and_computer_science": "47,5", "engineering": "27,6", "Agriculture": "56,9", "health_and_social_security": "76,6", "services": "41,1"},
{"year": "2015", "total": "53,6", "education": "80,7", "arts_and_humanities": "58,7", "social_sciences_management_and_law": "58,6", "sciences_maths_and_computer_science": "47,8", "engineering": "27,0", "Agriculture": "56,3", "health_and_social_security": "76,7", "services": "41,0"},
{"year": "2016", "total": "53,4", "education": "80,3", "arts_and_humanities": "58,6", "social_sciences_management_and_law": "58,9", "sciences_maths_and_computer_science": "45,7", "engineering": "27,3", "Agriculture": "56,4", "health_and_social_security": "76,8", "services": "41,7"},
{"year": "2017", "total": "53,6", "education": "79,3", "arts_and_humanities": "59,0", "social_sciences_management_and_law": "59,5", "sciences_maths_and_computer_science": "44,2", "engineering": "27,5", "Agriculture": "57,3", "health_and_social_security": "77,0", "services": "42,1"},
{"year": "2013", "total": "52,2", "education": "80,4", "arts_and_humanities": "57,9", "social_sciences_management_and_law": "58,1", "sciences_maths_and_computer_science": "47,2", "engineering": "27,4", "Agriculture": "56,9", "health_and_social_security": "76,8", "services": "41,4"},
{"year": "2014", "total": "53,5", "education": "80,7", "arts_and_humanities": "58,0", "social_sciences_management_and_law": "58,5", "sciences_maths_and_computer_science": "47,5", "engineering": "27,6", "Agriculture": "56,9", "health_and_social_security": "76,6", "services": "41,1"},
{"year": "2015", "total": "53,6", "education": "80,7", "arts_and_humanities": "58,7", "social_sciences_management_and_law": "58,6", "sciences_maths_and_computer_science": "47,8", "engineering": "27,0", "Agriculture": "56,3", "health_and_social_security": "76,7", "services": "41,0"},
{"year": "2016", "total": "53,4", "education": "80,3", "arts_and_humanities": "58,6", "social_sciences_management_and_law": "58,9", "sciences_maths_and_computer_science": "45,7", "engineering": "27,3", "Agriculture": "56,4", "health_and_social_security": "76,8", "services": "41,7"}
]
def generate_data_clean():
return [{"education": 80.4, "health_and_social_security": 76.8, "social_sciences_management_and_law": 58.1, "sciences_maths_and_computer_science": 47.2, "engineering": 27.4, "arts_and_humanities": 56.9, "year": 2013, "services": 41.4, "total": 53.2, "agriculture": 56.9},
{"education": 80.7, "health_and_social_security": 76.6, "social_sciences_management_and_law": 58.5, "sciences_maths_and_computer_science": 47.5, "engineering": 27.6, "arts_and_humanities": 58.0, "year": 2014, "services": 41.1, "total": 53.5, "agriculture": 56.9},
{"education": 80.4, "health_and_social_security": 76.8, "social_sciences_management_and_law": 58.1, "sciences_maths_and_computer_science": 47.2, "engineering": 27.4, "arts_and_humanities": 56.9, "year": 2013, "services": 41.4, "total": 53.2, "agriculture": 56.9},
{"education": 80.7, "health_and_social_security": 76.6, "social_sciences_management_and_law": 58.5, "sciences_maths_and_computer_science": 47.5, "engineering": 27.6, "arts_and_humanities": 58.0, "year": 2014, "services": 41.1, "total": 53.5, "agriculture": 56.9},
{"education": 80.7, "health_and_social_security": 76.7, "social_sciences_management_and_law": 58.6, "sciences_maths_and_computer_science": 47.8, "engineering": 27.0, "arts_and_humanities": 58.7, "year": 2015, "services": 41.0, "total": 53.6, "agriculture": 56.3},
{"education": 80.3, "health_and_social_security": 76.8, "social_sciences_management_and_law": 58.9, "sciences_maths_and_computer_science": 45.7, "engineering": 27.3, "arts_and_humanities": 58.6, "year": 2016, "services": 41.7, "total": 53.4, "agriculture": 56.4},
{"education": 79.3, "health_and_social_security": 77.0, "social_sciences_management_and_law": 59.5, "sciences_maths_and_computer_science": 44.2, "engineering": 27.5, "arts_and_humanities": 59.0, "year": 2017, "services": 42.1, "total": 53.6, "agriculture": 57.3},
{"education": 80.4, "health_and_social_security": 76.8, "social_sciences_management_and_law": 58.1, "sciences_maths_and_computer_science": 47.2, "engineering": 27.4, "arts_and_humanities": 56.9, "year": 2013, "services": 41.4, "total": 53.2, "agriculture": 56.9},
{"education": 80.7, "health_and_social_security": 76.6, "social_sciences_management_and_law": 58.5, "sciences_maths_and_computer_science": 47.5, "engineering": 27.6, "arts_and_humanities": 58.0, "year": 2014, "services": 41.1, "total": 53.5, "agriculture": 56.9},
{"education": 80.7, "health_and_social_security": 76.7, "social_sciences_management_and_law": 58.6, "sciences_maths_and_computer_science": 47.8, "engineering": 27.0, "arts_and_humanities": 58.7, "year": 2015, "services": 41.0, "total": 53.6, "agriculture": 56.3},
{"education": 80.3, "health_and_social_security": 76.8, "social_sciences_management_and_law": 58.9, "sciences_maths_and_computer_science": 45.7, "engineering": 27.3, "arts_and_humanities": 58.6, "year": 2016, "services": 41.7, "total": 53.4, "agriculture": 56.4},
{"education": 79.3, "health_and_social_security": 77.0, "social_sciences_management_and_law": 59.5, "sciences_maths_and_computer_science": 44.2, "engineering": 27.5, "arts_and_humanities": 59.0, "year": 2017, "services": 42.1, "total": 53.6, "agriculture": 57.3}
]
def generate_data_enrolled():
return [(2013, 56.9), (2014, 58.0), (2015, 58.7), (2016, 58.6), (2017, 59.0)]
def b2w1_exerc_1_grading(clean_percentage):
data = generate_data()
data_fixed = clean_percentage(data)
assert len(data_fixed) == 10, "Not correct. Keep trying."
assert isinstance(data_fixed, list), "Not correct. Keep trying."
for r in range(len(data)):
assert isinstance(data[r], dict), "Not correct. Keep trying."
assert len(data[r]) == 10, "Not correct. Keep trying."
for k in data[r].keys():
assert "(R)" not in data_fixed[r][k], "Not correct. Keep trying."
assert re.search(r"[a-zA-Z\(\)]", data_fixed[r][k]) is None, \
"Not correct. Keep trying."
def b2w1_exerc_2_grading(clean_header_string, clean_header):
data = generate_data()
for r in range(len(data)):
for k in data[r].keys():
area_clean = clean_header_string(k)
assert isinstance(area_clean, str), "Not correct. Keep trying."
assert re.search(r"[\,\sA-Z]", area_clean) is None, \
"Not correct. Keep trying."
data_fixed = clean_header(data)
assert isinstance(data_fixed, list), "Not correct. Keep trying."
for r in range(len(data_fixed)):
assert isinstance(data_fixed[r], dict), "Not correct. Keep trying."
for k in data_fixed[r].keys():
assert isinstance(k, str), "Not correct. Keep trying."
assert re.search(r"[\,\sA-Z]", k) is None, \
"Not correct. Keep trying."
def b2w1_exerc_3_grading(commas, data_types):
data_comma = generate_data_comma()
data_dot = commas(data_comma)
assert isinstance(data_dot, list), "Not correct. Keep trying."
for r in range(len(data_dot)):
assert isinstance(data_dot[r], dict), "Not correct. Keep trying."
for v in data_dot[r].values():
assert isinstance(v, str), "Not correct. Keep trying."
assert re.search(r"[\,]", v) is None, \
"Not correct. Keep trying."
data_dot_type = data_types(data_dot)
assert isinstance(data_dot_type, list), "Not correct. Keep trying."
for r in range(len(data_dot_type)):
assert isinstance(data_dot_type[r], dict), "Not correct. Keep trying."
for k, v in data_dot_type[r].items():
if k == "year":
assert isinstance(v, int), "Not correct. Keep trying."
else:
assert isinstance(v, float), "Not correct. Keep trying."
def b2w1_exerc_4_grading(education_years):
data_clean = generate_data_clean()
education_areas = [key for key in data_clean[0].keys() if key not in ["year"]]
for area in education_areas:
percent_women_year = education_years(data_clean, area)
assert isinstance(percent_women_year, list), "Not correct. Keep trying."
assert len(percent_women_year) == 12, "Not correct. Keep trying."
for year in percent_women_year:
assert isinstance(year, tuple), "Not correct. Keep trying."
assert len(year) == 2, "Not correct. Keep trying."
assert isinstance(year[0], int), "Not correct. Keep trying."
assert isinstance(year[1], float), "Not correct. Keep trying."
test1 = education_years(data_clean, "total")
assert math.isclose(test1[3][1],
53.5,
abs_tol=0.1), "Not correct. Keep trying."
assert test1[10][0] == 2016, "Not correct. Keep trying."
test2 = education_years(data_clean, "services")
assert math.isclose(test2[5][1],
41.7,
abs_tol=0.1), "Not correct. Keep trying."
assert test2[8][0] == 2014, "Not correct. Keep trying."
source = inspect.getsource(education_years)
assert "map" in source, "Not correct. Keep trying."
assert "lambda" in source, "Not correct. Keep trying."
def b2w1_exerc_5_grading(female_enrolled, threshold):
data_enrolled = generate_data_enrolled()
data_filtered = female_enrolled(data_enrolled, 0)
assert isinstance(data_filtered, list), "Not correct. Keep trying."
assert len(data_filtered) == 5, "Not correct. Keep trying."
for year in data_filtered:
assert isinstance(year, tuple), "Not correct. Keep trying."
assert len(year) == 2, "Not correct. Keep trying."
assert isinstance(year[0], int), "Not correct. Keep trying."
assert isinstance(year[1], float), "Not correct. Keep trying."
test1 = female_enrolled(data_enrolled, 58.1)
assert len(test1) == 3, "Not correct. Keep trying."
assert math.isclose(test1[1][1],
58.7,
| |
<reponame>arthurlogilab/pyramid_session_redis
# -*- coding: utf-8 -*-
# stdlib
import hashlib
# pypi
from pyramid.decorator import reify
from pyramid.exceptions import ConfigurationError
from pyramid.interfaces import ISession
from zope.interface import implementer
# local
from .compat import (
pickle,
token_hex,
to_unicode,
)
from .exceptions import (
InvalidSession,
InvalidSession_DeserializationError,
InvalidSession_Lazycreate,
InvalidSession_PayloadLegacy,
InvalidSession_NotInBackend,
InvalidSession_PayloadTimeout,
RawDeserializationError,
)
from .util import (
empty_session_payload,
int_time,
LAZYCREATE_SESSION,
NotSpecified,
persist,
recookie,
refresh,
SESSION_API_VERSION,
)
from .util import encode_session_payload as encode_session_payload_func
from .util import decode_session_payload as decode_session_payload_func
# ==============================================================================
def hashed_value(serialized):
"""
quick hash of serialized data
only used for comparison
:param serialized: string. serialized data to hash.
:returns hash: string.
"""
return hashlib.md5(serialized).hexdigest()
class _SessionState(object):
# markers for update
please_persist = None
please_recookie = None
please_refresh = None
# these markers are consulted in cleanup routines
dont_persist = None
dont_refresh = None
# optional attributes, not set by default
cookie_expires = NotSpecified
cookie_max_age = NotSpecified
def __init__(
self,
session_id,
managed_dict,
created,
timeout,
expires,
version,
new,
persisted_hash,
):
"""
all these args are guaranteed to be submitted for the object;
no need to create default object attributes
"""
self.session_id = session_id
self.managed_dict = managed_dict
self.created = created
self.timeout = timeout
self.expires = expires
self.version = version
self.new = new
self.persisted_hash = persisted_hash
def should_persist(self, session):
"""
this is a backup routine
compares the persisted hash with a hash of the current value
returns `False` or `serialized_session`
:param session:
:returns serialized_session:
"""
if self.dont_persist:
return False
if self.expires and session._timeout_trigger:
if session.timestamp >= (self.expires - session._timeout_trigger):
self.please_persist = True
if self.please_persist:
return session.to_redis()
if not session._detect_changes:
return False
serialized_session = session.to_redis()
serialized_hash = hashed_value(serialized_session)
if serialized_hash == self.persisted_hash:
return False
return serialized_session
@implementer(ISession)
class RedisSession(object):
"""
Implements the Pyramid ISession and IDict interfaces and is returned by
the ``RedisSessionFactory``.
Methods that modify the ``dict`` (get, set, update, etc.) are decorated
with ``@persist`` to update the persisted copy in Redis and reset the
timeout.
Methods that are read-only (items, keys, values, etc.) are decorated
with ``@refresh`` to reset the session's expire time in Redis.
Methods that request the SetCookie headers are updated are decorated
with ``@recookie``.
Session methods make use of the dict methods that already communicate with
Redis, so they are not decorated.
Parameters:
``redis``
A Redis connection object.
``session_id``
A unique string associated with the session. Used as a prefix for keys
and hashes associated with the session.
``new``
Boolean. Whether this session is new (whether it was created in this
request).
``new_session``
A function that takes no arguments. It should insert a new session into
Redis under a new session_id, and return that session_id.
``new_session_payload``
UNDER DEVELOPMENT
A function that takes no arguments. It is used to to generate a new session
payload without creating the id (as new_session might)
``serialize``
A function to serialize pickleable Python objects. Default:
``cPickle.dumps``.
``deserialize``
The dual of ``serialize``, to convert serialized strings back to Python
objects. Default: ``cPickle.loads``.
``set_redis_ttl``
If ``True`` sets TTL data in Redis. If ``False`` assumes Redis is
configured as a LRU and does not update the expiry data via SETEX.
Default: ``True``
``set_redis_ttl_readheavy``
If ``True``, sets TTL data in Redis within a PIPELINE via GET+EXPIRE and
supresses automatic TTL refresh during the deferred cleanup phase. If not
``True``, an EXPIRE is sent as a separate action during the deferred
cleanup phase. The optimized behavior improves performance on read-heavy
operations, but may degrade performance on write-heavy operations. This
requires a ``timeout`` and ``set_redis_ttl`` to be True; it is not
compatible with ``timeout_trigger`` or ``python_expires``.
Default: ``None``
``_set_redis_ttl_onexit`` If ``True``, automatically queues a TTL Redis set
during the cleanup phase. This should be calculated based on the following
criteria:
* self._timeout
* self._set_redis_ttl
* not self._timeout_trigger
* not self._python_expires
* not self._set_redis_ttl_readheavy
This is handled as a config option and not a realtime calcluation to save
some processing. Unit Tests will want to pre-calculate this, otherwise the
main factory API of this package handles it.
Default: ``None``
``detect_changes``
If ``True``, supports change detection Default: ``True``
``deserialized_fails_new``
If ``True`` will handle deserializtion errors by creating a new session.
``new_payload_func``
Default ``None``. Function used to create a new session.
``timeout_trigger``
Default ``None``. If an int, used to trigger timeouts.
``python_expires``
Default ``None``. If True, Python is used to manage timeout data. setting
``timeout_trigger`` will enable this.
"""
def __init__(
self,
redis,
session_id, # could be ``LAZYCREATE_SESSION``
new,
new_session,
new_payload_func=None,
serialize=pickle.dumps,
deserialize=pickle.loads,
set_redis_ttl=True,
detect_changes=True,
deserialized_fails_new=None,
encode_session_payload_func=None,
decode_session_payload_func=None,
timeout=None,
timeout_trigger=None,
python_expires=None,
set_redis_ttl_readheavy=None,
_set_redis_ttl_onexit=None,
):
if timeout_trigger and not python_expires: # fix this
python_expires = True
self.redis = redis
self.serialize = serialize
self.deserialize = deserialize
self.new_session = new_session
if new_payload_func is not None:
self.new_payload = new_payload_func
if encode_session_payload_func is not None:
self.encode_session_payload = encode_session_payload_func
if decode_session_payload_func is not None:
self.decode_session_payload = decode_session_payload_func
self._set_redis_ttl = set_redis_ttl
self._set_redis_ttl_readheavy = set_redis_ttl_readheavy
self._detect_changes = detect_changes
self._deserialized_fails_new = deserialized_fails_new
self._timeout = timeout
self._timeout_trigger = timeout_trigger
self._python_expires = python_expires
self._new = new
self._session_state = self._make_session_state(session_id=session_id, new=new)
if _set_redis_ttl_onexit:
self._session_state.please_refresh = True
def _resync(self):
"""resyncs the session. this is really only needed for testing."""
self._session_state = self._make_session_state(
session_id=self.session_id, new=self._new
)
def new_session(self):
# this should be set via __init__
raise NotImplementedError()
def new_payload(self):
# this should be set via __init__
return empty_session_payload()
def encode_session_payload(self, *args, **kwargs):
"""
used to recode for serialization
this can be overridden via __init__
"""
return encode_session_payload_func(*args, **kwargs)
def decode_session_payload(self, payload):
"""
used to recode for serialization
this can be overridden via __init__
:param payload:
:returns decoded payload:
"""
return decode_session_payload_func(payload)
def serialize(self):
# this should be set via __init__
raise NotImplementedError()
def deserialize(self):
# this should be set via __init__
raise NotImplementedError()
@reify
def _session_state(self):
"""this should only be executed after an `invalidate()`
The `invalidate()` will "del self._session_state", which will remove the
'_session_state' entry from the object dict (as created by __init__ or by
this function which reify's the return value). Removing that function
allows this method to execute, and `reify` puts the new result in the
object's dict.
"""
return self._make_session_state(session_id=LAZYCREATE_SESSION, new=True)
@reify
def timestamp(self):
return int_time()
def _make_session_state(self, session_id, new):
"""
This will try to load the session_id in Redis via ``from_redis``.
If this fails, it will raise ``InvalidSession`` variants
:param session_id:
:param new:
:returns `_SessionState``:
"""
if session_id == LAZYCREATE_SESSION:
persisted_hash = None
persisted = self.new_payload()
else:
# self.from_redis needs to take a session_id here, because otherwise it
# would look up self.session_id, which is not ready yet as
# session_state has not been created yet.
(persisted, persisted_hash) = self.from_redis(
session_id=session_id,
persisted_hash=(True if self._detect_changes else False),
)
expires = persisted.get("x")
if expires:
if self.timestamp > expires:
raise InvalidSession_PayloadTimeout(
"`session_id` (%s) timed out in python" % session_id
)
version = persisted.get("v")
if not version or (version < SESSION_API_VERSION):
raise InvalidSession_PayloadLegacy(
"`session_id` (%s) is a legacy format" % session_id
)
return _SessionState(
session_id=session_id,
managed_dict=persisted["m"], # managed_dict
created=persisted["c"], # created
timeout=persisted.get("t"), # timeout
expires=persisted.get("x"), # expires
version=persisted.get("v"), # session api version
new=new,
persisted_hash=persisted_hash,
)
@property
def session_id(self):
return self._session_state.session_id
@property
def managed_dict(self):
return self._session_state.managed_dict
@property
def created(self):
return self._session_state.created
@property
def timeout(self):
return self._session_state.timeout
@property
def expires(self):
return self._session_state.expires
@property
def version(self):
return self._session_state.version
@property
def new(self):
return self._session_state.new
def to_redis(self):
"""Serialize a dict of the data that needs to be persisted for this
session, for storage in Redis.
Primarily used by the ``@persist`` decorator to save the current
session state to Redis.
"""
data = self.encode_session_payload(
self.managed_dict,
self.created,
self.timeout,
self.expires,
timeout_trigger=self._timeout_trigger,
python_expires=self._python_expires,
)
return self.serialize(data)
def from_redis(self, session_id=None, persisted_hash=None):
"""
Get and deserialize the persisted data for this session from Redis.
If ``persisted_hash`` is ``None`` (default), returns a single
variable `deserialized`.
If set to ``True`` or ``False``, returns a tuple.
"""
_session_id = session_id or self.session_id
if _session_id == LAZYCREATE_SESSION:
raise InvalidSession_Lazycreate("`session_id` is LAZYCREATE_SESSION")
# optimize a `TTL refresh` under certain conditions
persisted = None
if self._set_redis_ttl_readheavy:
with self.redis.pipeline() as pipe:
persisted = pipe.get(_session_id)
_updated = pipe.expire(_session_id, self._timeout)
# mark that we shouldn't refresh
self._session_state.dont_refresh = True
else:
persisted = self.redis.get(_session_id)
if persisted is None:
raise InvalidSession_NotInBackend(
"`session_id` (%s) not | |
if hasattr(inv, 'vSwitchId'):
self.vSwitchId = inv.vSwitchId
else:
self.vSwitchId = None
if hasattr(inv, 'status'):
self.status = inv.status
else:
self.status = None
if hasattr(inv, 'cidrBlock'):
self.cidrBlock = inv.cidrBlock
else:
self.cidrBlock = None
if hasattr(inv, 'availableIpAddressCount'):
self.availableIpAddressCount = inv.availableIpAddressCount
else:
self.availableIpAddressCount = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'ecsVpcUuid'):
self.ecsVpcUuid = inv.ecsVpcUuid
else:
self.ecsVpcUuid = None
if hasattr(inv, 'identityZoneUuid'):
self.identityZoneUuid = inv.identityZoneUuid
else:
self.identityZoneUuid = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class EcsVpcInventory(object):
def __init__(self):
self.uuid = None
self.ecsVpcId = None
self.dataCenterUuid = None
self.status = None
self.deleted = None
self.name = None
self.cidrBlock = None
self.vRouterId = None
self.description = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'ecsVpcId'):
self.ecsVpcId = inv.ecsVpcId
else:
self.ecsVpcId = None
if hasattr(inv, 'dataCenterUuid'):
self.dataCenterUuid = inv.dataCenterUuid
else:
self.dataCenterUuid = None
if hasattr(inv, 'status'):
self.status = inv.status
else:
self.status = None
if hasattr(inv, 'deleted'):
self.deleted = inv.deleted
else:
self.deleted = None
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'cidrBlock'):
self.cidrBlock = inv.cidrBlock
else:
self.cidrBlock = None
if hasattr(inv, 'vRouterId'):
self.vRouterId = inv.vRouterId
else:
self.vRouterId = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class VpcVirtualRouteEntryInventory(object):
def __init__(self):
self.uuid = None
self.type = None
self.vRouterType = None
self.status = None
self.destinationCidrBlock = None
self.nextHopId = None
self.virtualRouterUuid = None
self.nextHopType = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'type'):
self.type = inv.type
else:
self.type = None
if hasattr(inv, 'vRouterType'):
self.vRouterType = inv.vRouterType
else:
self.vRouterType = None
if hasattr(inv, 'status'):
self.status = inv.status
else:
self.status = None
if hasattr(inv, 'destinationCidrBlock'):
self.destinationCidrBlock = inv.destinationCidrBlock
else:
self.destinationCidrBlock = None
if hasattr(inv, 'nextHopId'):
self.nextHopId = inv.nextHopId
else:
self.nextHopId = None
if hasattr(inv, 'virtualRouterUuid'):
self.virtualRouterUuid = inv.virtualRouterUuid
else:
self.virtualRouterUuid = None
if hasattr(inv, 'nextHopType'):
self.nextHopType = inv.nextHopType
else:
self.nextHopType = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class VpcVirtualRouterInventory(object):
def __init__(self):
self.uuid = None
self.vrId = None
self.vpcUuid = None
self.name = None
self.description = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'vrId'):
self.vrId = inv.vrId
else:
self.vrId = None
if hasattr(inv, 'vpcUuid'):
self.vpcUuid = inv.vpcUuid
else:
self.vpcUuid = None
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class OssBucketInventory(object):
def __init__(self):
self.uuid = None
self.bucketName = None
self.dataCenterUuid = None
self.current = None
self.regionName = None
self.description = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'bucketName'):
self.bucketName = inv.bucketName
else:
self.bucketName = None
if hasattr(inv, 'dataCenterUuid'):
self.dataCenterUuid = inv.dataCenterUuid
else:
self.dataCenterUuid = None
if hasattr(inv, 'current'):
self.current = inv.current
else:
self.current = None
if hasattr(inv, 'regionName'):
self.regionName = inv.regionName
else:
self.regionName = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class OssUploadPartsInventory(object):
def __init__(self):
self.id = None
self.uploadId = None
self.partNumber = None
self.total = None
self.eTag = None
self.partSize = None
self.partCRC = None
self.ossBucketUuid = None
self.fileKey = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'id'):
self.id = inv.id
else:
self.id = None
if hasattr(inv, 'uploadId'):
self.uploadId = inv.uploadId
else:
self.uploadId = None
if hasattr(inv, 'partNumber'):
self.partNumber = inv.partNumber
else:
self.partNumber = None
if hasattr(inv, 'total'):
self.total = inv.total
else:
self.total = None
if hasattr(inv, 'eTag'):
self.eTag = inv.eTag
else:
self.eTag = None
if hasattr(inv, 'partSize'):
self.partSize = inv.partSize
else:
self.partSize = None
if hasattr(inv, 'partCRC'):
self.partCRC = inv.partCRC
else:
self.partCRC = None
if hasattr(inv, 'ossBucketUuid'):
self.ossBucketUuid = inv.ossBucketUuid
else:
self.ossBucketUuid = None
if hasattr(inv, 'fileKey'):
self.fileKey = inv.fileKey
else:
self.fileKey = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class AliyunDiskInventory(object):
def __init__(self):
self.uuid = None
self.diskId = None
self.name = None
self.description = None
self.identityZoneUuid = None
self.ecsInstanceUuid = None
self.diskCategory = None
self.diskType = None
self.diskChargeType = None
self.status = None
self.sizeWithGB = None
self.deviceInfo = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'diskId'):
self.diskId = inv.diskId
else:
self.diskId = None
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'identityZoneUuid'):
self.identityZoneUuid = inv.identityZoneUuid
else:
self.identityZoneUuid = None
if hasattr(inv, 'ecsInstanceUuid'):
self.ecsInstanceUuid = inv.ecsInstanceUuid
else:
self.ecsInstanceUuid = None
if hasattr(inv, 'diskCategory'):
self.diskCategory = inv.diskCategory
else:
self.diskCategory = None
if hasattr(inv, 'diskType'):
self.diskType = inv.diskType
else:
self.diskType = None
if hasattr(inv, 'diskChargeType'):
self.diskChargeType = inv.diskChargeType
else:
self.diskChargeType = None
if hasattr(inv, 'status'):
self.status = inv.status
else:
self.status = None
if hasattr(inv, 'sizeWithGB'):
self.sizeWithGB = inv.sizeWithGB
else:
self.sizeWithGB = None
if hasattr(inv, 'deviceInfo'):
self.deviceInfo = inv.deviceInfo
else:
self.deviceInfo = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class AliyunSnapshotInventory(object):
def __init__(self):
self.uuid = None
self.snapshotId = None
self.name = None
self.description = None
self.dataCenterUuid = None
self.diskUuid = None
self.status = None
self.aliyunSnapshotUsage = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'snapshotId'):
self.snapshotId = inv.snapshotId
else:
self.snapshotId = None
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'dataCenterUuid'):
self.dataCenterUuid = inv.dataCenterUuid
else:
self.dataCenterUuid = None
if hasattr(inv, 'diskUuid'):
self.diskUuid = inv.diskUuid
else:
self.diskUuid = None
if hasattr(inv, 'status'):
self.status = inv.status
else:
self.status = None
if hasattr(inv, 'aliyunSnapshotUsage'):
self.aliyunSnapshotUsage = inv.aliyunSnapshotUsage
else:
self.aliyunSnapshotUsage = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class BaremetalConsoleProxyInventory(object):
def __init__(self):
self.uuid = None
self.chassisUuid = None
self.token = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'chassisUuid'):
self.chassisUuid = inv.chassisUuid
else:
self.chassisUuid = None
if hasattr(inv, 'token'):
self.token = inv.token
else:
self.token = None
class BaremetalHardwareInfoInventory(object):
def __init__(self):
self.uuid = None
self.chassisUuid = None
self.type = None
self.content = None
self.createDate = None
self.lastOpDate = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'chassisUuid'):
self.chassisUuid = inv.chassisUuid
else:
self.chassisUuid = None
if hasattr(inv, 'type'):
self.type = inv.type
else:
self.type = None
if hasattr(inv, 'content'):
self.content = inv.content
else:
self.content = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
class BaremetalHostCfgInventory(object):
def __init__(self):
self.uuid = None
self.chassisUuid = None
self.password = None
self.vnc = None
self.unattended = None
self.cloneIso = None
self.createDate = None
self.lastOpDate = None
self.nicCfgs = None
self.bondings = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'chassisUuid'):
self.chassisUuid = inv.chassisUuid
else:
self.chassisUuid = None
if hasattr(inv, 'password'):
self.password = inv.password
else:
self.password = None
if hasattr(inv, 'vnc'):
self.vnc = inv.vnc
else:
self.vnc = None
if hasattr(inv, 'unattended'):
self.unattended = inv.unattended
else:
self.unattended = None
| |
import datetime
import os
import uuid
from os.path import join as opjoin
from pathlib import Path
import numpy as np
import requests
import yaml
from celery.result import AsyncResult
from django.db.models import Q
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, status, views, viewsets
from rest_framework.response import Response
from backend import celery_app, settings
from backend_app import mixins as BAMixins, models, serializers, swagger
from backend_app import utils
from deeplearning.tasks import classification, segmentation
from deeplearning.utils import nn_settings
class AllowedPropViewSet(BAMixins.ParamListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.AllowedProperty.objects.all()
serializer_class = serializers.AllowedPropertySerializer
params = ['model_id', 'property_id']
def get_queryset(self):
model_id = self.request.query_params.get('model_id')
property_id = self.request.query_params.get('property_id')
self.queryset = models.AllowedProperty.objects.filter(model_id=model_id, property_id=property_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY, "Integer representing a model",
required=True, type=openapi.TYPE_INTEGER),
openapi.Parameter('property_id', openapi.IN_QUERY, "Integer representing a property",
required=True, type=openapi.TYPE_INTEGER)]
)
def list(self, request, *args, **kwargs):
"""Return the allowed and default values of a property
This method returns the values that a property can assume depending on the model employed. \
It provides a default value and a comma separated list of values to choose from.
When this api returns an empty list, the property allowed values and default should be retrieved \
using the `/properties/{id}` API.
"""
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""Create a new AllowedProperty
This method create a new AllowedProperty
"""
return super().create(request, *args, **kwargs)
class DatasetViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.Dataset.objects.filter(is_single_image=False)
serializer_class = serializers.DatasetSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Dataset.objects.filter(task_id=task_id, is_single_image=False)
# self.queryset = models.Dataset.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request, *args, **kwargs):
"""Get the list datasets to use for training or finetuning
This method returns all the datasets in the backend.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single dataset
This method returns the `{id}` dataset.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.DatasetViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Upload a new dataset downloading it from a URL
This API uploads a dataset YAML file and stores it in the backend.
The `path` field must contain the URL of a dataset, e.g. \
[`dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml`](https://www.dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml?dl=1).
"""
serializer = self.get_serializer(data=request.data)
if not serializer.is_valid():
return Response({'error': 'Validation error. Request data is malformed.'},
status=status.HTTP_400_BAD_REQUEST)
# Download the yml file in url
url = serializer.validated_data['path']
dataset_name = serializer.validated_data['name']
dataset_out_path = f'{settings.DATASETS_DIR}/{dataset_name}.yml'
if Path(f'{settings.DATASETS_DIR}/{dataset_name}.yml').exists():
return Response({'error': f'The dataset `{dataset_name}` already exists'},
status=status.HTTP_400_BAD_REQUEST)
try:
r = requests.get(url, allow_redirects=True)
if r.status_code == 200:
yaml_content = yaml.load(r.content, Loader=yaml.FullLoader)
with open(f'{settings.DATASETS_DIR}/{dataset_name}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
serializer.save(path=dataset_out_path)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
except requests.exceptions.RequestException:
# URL malformed
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
class InferenceViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Start an inference process using a pre-trained model on a dataset
This is the main entry point to start the inference. \
It is mandatory to specify a pre-trained model and a dataset.
"""
serializer = serializers.InferenceSerializer(data=request.data)
if serializer.is_valid():
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class InferenceSingleViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSingleSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Starts the inference providing an image URL
This API allows the inference of a single image.
It is mandatory to specify the same fields of `/inference` API, but for dataset_id which is replaced by \
the url of the image to process.
"""
serializer = serializers.InferenceSingleSerializer(data=request.data)
if serializer.is_valid():
image_url = serializer.validated_data['image_url']
project_id = serializer.validated_data['project_id']
task_id = models.Project.objects.get(id=project_id).task_id
# Create a dataset with the single image to process
dummy_dataset = f'name: "{image_url}"\n' \
f'description: "{image_url} auto-generated dataset"\n' \
f'images: ["{image_url}"]\n' \
f'split:\n' \
f' test: [0]'
# Save dataset and get id
d = models.Dataset(name=f'single-image-dataset', task_id=task_id, path='', is_single_image=True)
d.save()
try:
yaml_content = yaml.load(dummy_dataset, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
d.delete()
print(e)
return Response({'error': 'Error in YAML parsing'}, status=status.HTTP_400_BAD_REQUEST)
with open(f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
d.path = f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml'
d.save()
serializer.validated_data['dataset_id'] = d
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ModelViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Model.objects.all()
serializer_class = serializers.ModelSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Model.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY,
"Integer for filtering the models based on task.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
This API allows the client to know which Neural Network models are available in the system in order to allow \
their selection.
The optional `task_id` parameter is used to filter them based on the task the models are used for.
"""
return super().list(request)
class ModelWeightsViewSet(BAMixins.ParamListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.ModelWeights.objects.all()
serializer_class = serializers.ModelWeightsSerializer
params = ['model_id']
def get_queryset(self):
if self.action == 'list':
model_id = self.request.query_params.get('model_id')
self.queryset = models.ModelWeights.objects.filter(model_id=model_id)
return self.queryset
else:
return super(ModelWeightsViewSet, self).get_queryset()
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY,
"Return the modelweights obtained on `model_id` model.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
When 'use pre-trained' is selected, it is possible to query the backend passing a `model_id` to obtain a list
of dataset on which it was pretrained.
"""
return super().list(request)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single modelweight
This API returns the modelweight with the requested`{id}`.
"""
return super().retrieve(request, *args, **kwargs)
def get_obj(self, id):
try:
return models.ModelWeights.objects.get(id=id)
except models.ModelWeights.DoesNotExist:
return None
def put(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
weight = self.get_obj(request.data['id'])
if not weight:
error = {"Error": f"Weight {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = self.serializer_class(weight, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements with model_id in request
queryset = models.ModelWeights.objects.filter(model_id=weight.model_id)
serializer = self.get_serializer(queryset, many=True)
# serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class OutputViewSet(views.APIView):
@staticmethod
def trunc(values, decs=0):
return np.trunc(values * 10 ** decs) / (10 ** decs)
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('process_id', openapi.IN_QUERY,
"Pass a required UUID representing a finished process.",
type=openapi.TYPE_STRING, format=openapi.FORMAT_UUID, required=False)],
responses=swagger.OutputViewSet_get_responses
)
def get(self, request, *args, **kwargs):
"""Retrieve results about an inference process
This API provides information about an `inference` process.In classification task it returns the list \
of images and an array composed of the classes prediction scores.
In segmentation task it returns the URLs of the segmented images.
"""
if not self.request.query_params.get('process_id'):
error = {'Error': f'Missing required parameter `process_id`'}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
process_id = self.request.query_params.get('process_id')
infer = models.Inference.objects.filter(celery_id=process_id)
if not infer:
# already deleted weight/training or inference
return Response({"result": "Process stopped before finishing or non existing."},
status=status.HTTP_404_NOT_FOUND)
if AsyncResult(process_id).status == 'PENDING':
return Response({"result": "Process in execution. Try later for output results."},
status=status.HTTP_200_OK)
infer = infer.first()
if not os.path.exists(opjoin(settings.OUTPUTS_DIR, infer.outputfile)):
return Response({"result": "Output file not found"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
outputs = open(opjoin(settings.OUTPUTS_DIR, infer.outputfile), 'r')
# Differentiate classification and segmentation
if infer.modelweights_id.model_id.task_id.name.lower() == 'classification':
lines = outputs.read().splitlines()
lines = [line.split(';') for line in lines]
# preds = self.trunc(preds, decs=8)
else:
# Segmentation
# output file contains path of files
uri = request.build_absolute_uri(settings.MEDIA_URL)
lines = outputs.read().splitlines()
lines = [l.replace(settings.OUTPUTS_DIR, uri) for l in lines]
response = {'outputs': lines}
return Response(response, status=status.HTTP_200_OK)
class ProjectViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.Project.objects.all()
serializer_class = serializers.ProjectSerializer
def get_obj(self, id):
try:
return models.Project.objects.get(id=id)
except models.Project.DoesNotExist:
return None
def list(self, request, *args, **kwargs):
"""Loads all the projects
This method lists all the available projects.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single project
Returns a project by `{id}`.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.ProjectViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Create a new project
Create a new project.
"""
return super().create(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
project = self.get_obj(request.data['id'])
if not project:
error = {"Error": f"Project {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.ProjectSerializer(project, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements
return self.list(request)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing project
Update a project instance by providing its `{id}`.
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class PropertyViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = models.Property.objects.all()
serializer_class = | |
** 2 == 0:
self.polygons.append(
numpy.array(paths[ii][0] + paths[ii][1][-2::-1])
)
else:
self.polygons.append(
numpy.array(paths[ii][0] + paths[ii][1][::-1])
)
paths[ii][0] = paths[ii][0][-1:]
paths[ii][1] = paths[ii][1][-1:]
self.layers.append(layer[ii % len(layer)])
self.datatypes.append(datatype[ii % len(datatype)])
for ii in range(number_of_paths):
diff = paths[ii][0][0] - paths[ii][1][0]
if diff[0] ** 2 + diff[1] ** 2 == 0:
paths[ii][1] = paths[ii][1][1:]
diff = p1[ii][0] - p1[ii][1]
if diff[0] ** 2 + diff[1] ** 2 != 0:
paths[ii][0].append(p1[ii][0])
paths[ii][1].append(p1[ii][1])
self.polygons.extend(numpy.array(pol[0] + pol[1][::-1]) for pol in paths)
self.layers.extend(
(layer * (number_of_paths // len(layer) + 1))[:number_of_paths]
)
self.datatypes.extend(
(datatype * (number_of_paths // len(datatype) + 1))[:number_of_paths]
)
def __str__(self):
return "PolyPath ({} polygons, {} vertices, layers {}, datatypes {})".format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
class _SubPath(object):
"""
Single path component.
"""
__slots__ = "x", "dx", "off", "wid", "h", "err", "max_evals"
def __init__(self, x, dx, off, wid, tolerance, max_evals):
self.x = x
self.dx = dx
self.off = off
self.wid = wid
self.err = tolerance ** 2
self.h = 0.5 / max_evals
self.max_evals = max_evals
def __str__(self):
return "SubPath ({} - {})".format(self(0, 1e-6, 0), self(1, 1e-6, 0))
def __call__(self, u, arm):
v = self.dx(u, self.h)[::-1] * _pmone
v /= (v[0] ** 2 + v[1] ** 2) ** 0.5
x = self.x(u) + self.off(u) * v
if arm == 0:
return x
u0 = max(0, u - self.h)
u1 = min(1, u + self.h)
w = (self(u1, 0) - self(u0, 0))[::-1] * _pmone
w /= (w[0] ** 2 + w[1] ** 2) ** 0.5
if arm < 0:
return x - 0.5 * self.wid(u) * w
return x + 0.5 * self.wid(u) * w
def grad(self, u, arm):
u0 = max(0, u - self.h)
u1 = min(1, u + self.h)
return (self(u1, arm) - self(u0, arm)) / (u1 - u0)
def points(self, u0, u1, arm):
u = [u0, u1]
pts = [numpy.array(self(u[0], arm)), numpy.array(self(u[1], arm))]
i = 1
while i < len(pts) < self.max_evals:
f = 0.2
while f < 1:
test_u = u[i - 1] * (1 - f) + u[i] * f
test_pt = numpy.array(self(test_u, arm))
test_err = pts[i - 1] * (1 - f) + pts[i] * f - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.err:
u.insert(i, test_u)
pts.insert(i, test_pt)
f = 1
i -= 1
else:
f += 0.3
i += 1
return pts
class FlexPath(object):
"""
Path object.
This class keeps information about the constructive parameters of
the path and calculates its boundaries only upon request.
It can be stored as a proper path element in the GDSII format,
unlike `Path`. In this case, the width must be constant along the
whole path.
Parameters
----------
points : array-like[N][2]
Points along the center of the path.
width : number, list
Width of each parallel path being created. The number of
parallel paths being created is defined by the length of this
list.
offset : number, list
Offsets of each parallel path from the center. If `width` is
not a list, the length of this list is used to determine the
number of parallel paths being created. Otherwise, offset must
be a list with the same length as width, or a number, which is
used as distance between adjacent paths.
corners : 'natural', 'miter', 'bevel', 'round', 'smooth', 'circular bend', callable, list
Type of joins. A callable must receive 6 arguments (vertex and
direction vector from both segments being joined, the center
and width of the path) and return a list of vertices that make
the join. A list can be used to define the join for each
parallel path.
ends : 'flush', 'extended', 'round', 'smooth', 2-tuple, callable, list
Type of end caps for the paths. A 2-element tuple represents
the start and end extensions to the paths. A callable must
receive 4 arguments (vertex and direction vectors from both
sides of the path and return a list of vertices that make the
end cap. A list can be used to define the end type for each
parallel path.
bend_radius : number, list
Bend radii for each path when `corners` is 'circular bend'.
It has no effect for other corner types.
tolerance : number
Tolerance used to draw the paths and calculate joins.
precision : number
Precision for rounding the coordinates of vertices when
fracturing the final polygonal boundary.
max_points : integer
If the number of points in the polygonal path boundary is
greater than `max_points`, it will be fractured in smaller
polygons with at most `max_points` each. If `max_points` is
zero no fracture will occur.
gdsii_path : bool
If True, treat this object as a GDSII path element.
Otherwise, it will be converted into polygonal boundaries when
required.
width_transform : bool
If `gdsii_path` is True, this flag indicates whether the width
of the path should transform when scaling this object. It has
no effect when `gdsii_path` is False.
layer : integer, list
The GDSII layer numbers for the elements of each path. If the
number of layers in the list is less than the number of paths,
the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0 and
255). If the number of datatypes in the list is less than the
number of paths, the list is repeated.
Notes
-----
The value of `tolerance` should not be smaller than `precision`,
otherwise there would be wasted computational effort in calculating
the paths.
"""
__slots__ = (
"n",
"ends",
"corners",
"points",
"offsets",
"widths",
"layers",
"datatypes",
"tolerance",
"precision",
"max_points",
"gdsii_path",
"width_transform",
"bend_radius",
"_polygon_dict",
)
_pathtype_dict = {"flush": 0, "round": 1, "extended": 2, "smooth": 1}
def __init__(
self,
points,
width,
offset=0,
corners="natural",
ends="flush",
bend_radius=None,
tolerance=0.01,
precision=1e-3,
max_points=199,
gdsii_path=False,
width_transform=True,
layer=0,
datatype=0,
):
self._polygon_dict = None
if isinstance(width, list):
self.n = len(width)
self.widths = width
if isinstance(offset, list):
self.offsets = offset
else:
self.offsets = [
(i - 0.5 * (self.n - 1)) * offset for i in range(self.n)
]
else:
if isinstance(offset, list):
self.n = len(offset)
self.offsets = offset
else:
self.n = 1
self.offsets = [offset]
self.widths = [width] * self.n
self.widths = numpy.tile(self.widths, (len(points), 1))
self.offsets = numpy.tile(self.offsets, (len(points), 1))
self.points = numpy.array(points)
if isinstance(ends, list):
self.ends = [ends[i % len(ends)] for i in range(self.n)]
else:
self.ends = [ends for _ in range(self.n)]
if isinstance(corners, list):
self.corners = [corners[i % len(corners)] for i in range(self.n)]
else:
self.corners = [corners for _ in range(self.n)]
if isinstance(bend_radius, list):
self.bend_radius = [
bend_radius[i % len(bend_radius)] for i in range(self.n)
]
else:
self.bend_radius = [bend_radius for _ in range(self.n)]
if isinstance(layer, list):
self.layers = [layer[i % len(layer)] for i in range(self.n)]
else:
self.layers = [layer] * self.n
if isinstance(datatype, list):
self.datatypes = [datatype[i % len(datatype)] for i in range(self.n)]
else:
self.datatypes = [datatype] * self.n
self.tolerance = tolerance
self.precision = precision
self.max_points = max_points
self.gdsii_path = gdsii_path
self.width_transform = width_transform
if self.gdsii_path:
if any(end == "smooth" or callable(end) for end in self.ends):
warnings.warn(
"[GDSPY] Smooth and custom end caps are not supported in `FlexPath` with `gdsii_path == True`.",
stacklevel=3,
)
if any(
corner != "natural" and corner != "circular bend"
for corner in self.corners
):
warnings.warn(
"[GDSPY] Corner specification not supported in `FlexPath` with `gdsii_path == True`.",
stacklevel=3,
)
def __str__(self):
if self.n > 1:
return "FlexPath (x{}, {} segments, layers {}, datatypes {})".format(
self.n, self.points.shape[0], self.layers, self.datatypes
)
else:
return "FlexPath ({} segments, layer {}, datatype {})".format(
self.points.shape[0], self.layers[0], self.datatypes[0]
)
def get_polygons(self, by_spec=False):
"""
Calculate the polygonal boundaries described by this path.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
`by_spec` is True).
"""
if self._polygon_dict is None:
self._polygon_dict = {}
if self.points.shape[0] == 2:
# | |
= 4
data_len = len(self.data_3d[0])
msg = "Cannot generate {} realizations with data of length {}".format(
realizations_len, data_len
)
with self.assertRaisesRegex(ValueError, msg):
_ = set_up_variable_cube(
self.data_3d, realizations=np.arange(realizations_len)
)
def test_error_unmatched_height_levels(self):
"""Test error is raised if the heights provided do not match the
data dimensions"""
height_levels_len = 4
data_len = len(self.data_3d[0])
msg = "Cannot generate {} heights with data of length {}".format(
height_levels_len, data_len
)
with self.assertRaisesRegex(ValueError, msg):
_ = set_up_variable_cube(
self.data_3d, height_levels=np.arange(height_levels_len)
)
def test_realizations_from_data_height_levels(self):
""" Tests realizations from data and height coordinates added """
height_levels = [1.5, 3.0, 4.5]
data_4d = np.array([self.data_3d, self.data_3d])
result = set_up_variable_cube(data_4d, height_levels=height_levels)
self.assertArrayAlmostEqual(result.data, data_4d)
self.assertEqual(result.coord_dims("realization"), (0,))
self.assertArrayEqual(result.coord("realization").points, np.array([0, 1]))
self.assertEqual(result.coord_dims("height"), (1,))
self.assertArrayEqual(result.coord("height").points, np.array(height_levels))
self.assertEqual(result.coord_dims("latitude"), (2,))
self.assertEqual(result.coord_dims("longitude"), (3,))
def test_realizations_height_levels(self):
""" Tests realizations and height coordinates added """
realizations = [0, 3]
height_levels = [1.5, 3.0, 4.5]
data_4d = np.array([self.data_3d, self.data_3d])
result = set_up_variable_cube(
data_4d, realizations=realizations, height_levels=height_levels
)
self.assertArrayAlmostEqual(result.data, data_4d)
self.assertEqual(result.coord_dims("realization"), (0,))
self.assertArrayEqual(
result.coord("realization").points, np.array(realizations)
)
self.assertEqual(result.coord_dims("height"), (1,))
self.assertArrayEqual(result.coord("height").points, np.array(height_levels))
self.assertEqual(result.coord_dims("latitude"), (2,))
self.assertEqual(result.coord_dims("longitude"), (3,))
def test_error_no_height_levels_4d_data(self):
""" Tests error is raised if 4d data provided but not height_levels """
data_4d = np.array([self.data_3d, self.data_3d])
msg = "Height levels must be provided if data has 4 dimensions."
with self.assertRaisesRegex(ValueError, msg):
_ = set_up_variable_cube(data_4d)
def test_error_too_many_dimensions(self):
"""Test error is raised if input cube has more than 4 dimensions"""
data_5d = np.array([[self.data_3d, self.data_3d], [self.data_3d, self.data_3d]])
msg = "Expected 2 to 4 dimensions on input data: got 5"
with self.assertRaisesRegex(ValueError, msg):
_ = set_up_variable_cube(data_5d)
def test_error_not_enough_dimensions(self):
"""Test error is raised if 3D input cube and both realizations and heights provided"""
realizations = [0, 3, 4]
height_levels = [1.5, 3.0, 4.5]
msg = (
"Input data must have 4 dimensions to add both realization "
"and height coordinates: got 3"
)
with self.assertRaisesRegex(ValueError, msg):
_ = set_up_variable_cube(
self.data_3d, realizations=realizations, height_levels=height_levels
)
def test_standard_grid_metadata_uk(self):
"""Test standard grid metadata is added if specified"""
result = set_up_variable_cube(self.data, standard_grid_metadata="uk_det")
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "uk_extended")
self.assertEqual(result.attributes["mosg__model_configuration"], "uk_det")
def test_standard_grid_metadata_global(self):
"""Test standard grid metadata is added if specified"""
result = set_up_variable_cube(self.data_3d, standard_grid_metadata="gl_ens")
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "global")
self.assertEqual(result.attributes["mosg__model_configuration"], "gl_ens")
def test_latlon_grid_spacing(self):
"""Test ability to set up lat-lon grid around 0,0 with specified grid spacing"""
grid_spacing = 1
result = set_up_variable_cube(
self.data, spatial_grid="latlon", grid_spacing=grid_spacing
)
self.assertEqual(result.coord_dims("latitude"), (0,))
self.assertEqual(result.coord_dims("longitude"), (1,))
lat_spacing = abs(
result.coord("latitude").points[0] - result.coord("latitude").points[1]
)
lon_spacing = abs(
result.coord("longitude").points[0] - result.coord("longitude").points[1]
)
self.assertEqual(lat_spacing, grid_spacing)
self.assertEqual(lon_spacing, grid_spacing)
self.assertEqual(
abs(result.coord("latitude").points[0]),
abs(result.coord("latitude").points[-1]),
)
self.assertEqual(
abs(result.coord("longitude").points[0]),
abs(result.coord("longitude").points[-1]),
)
def test_equalarea_grid_spacing(self):
"""Test ability to set up equalarea grid around 0,0 with specified grid spacing"""
grid_spacing = 1
result = set_up_variable_cube(
self.data, spatial_grid="equalarea", grid_spacing=grid_spacing
)
self.assertEqual(result.coord_dims("projection_y_coordinate"), (0,))
self.assertEqual(result.coord_dims("projection_x_coordinate"), (1,))
y_spacing = abs(
result.coord("projection_y_coordinate").points[0]
- result.coord("projection_y_coordinate").points[1]
)
x_spacing = abs(
result.coord("projection_x_coordinate").points[0]
- result.coord("projection_x_coordinate").points[1]
)
self.assertEqual(y_spacing, grid_spacing)
self.assertEqual(x_spacing, grid_spacing)
self.assertEqual(
abs(result.coord("projection_y_coordinate").points[0]),
abs(result.coord("projection_y_coordinate").points[-1]),
)
self.assertEqual(
abs(result.coord("projection_x_coordinate").points[0]),
abs(result.coord("projection_x_coordinate").points[-1]),
)
def test_latlon_domain_corner_grid_spacing(self):
"""Test ability to set up lat-lon grid from domain corner with grid spacing"""
grid_spacing = 1
domain_corner = (-17, -10)
result = set_up_variable_cube(
self.data,
spatial_grid="latlon",
grid_spacing=grid_spacing,
domain_corner=domain_corner,
)
self.assertEqual(result.coord_dims("latitude"), (0,))
self.assertEqual(result.coord_dims("longitude"), (1,))
lat_spacing = abs(
result.coord("latitude").points[0] - result.coord("latitude").points[1]
)
lon_spacing = abs(
result.coord("longitude").points[0] - result.coord("longitude").points[1]
)
self.assertEqual(lat_spacing, grid_spacing)
self.assertEqual(lon_spacing, grid_spacing)
self.assertEqual(result.coord("latitude").points[0], domain_corner[0])
self.assertEqual(result.coord("longitude").points[0], domain_corner[1])
def test_equalarea_domain_corner_grid_spacing(self):
"""Test ability to set up equalarea grid from domain corner with grid spacing"""
grid_spacing = 1
domain_corner = (1100, 300)
result = set_up_variable_cube(
self.data,
spatial_grid="equalarea",
grid_spacing=grid_spacing,
domain_corner=domain_corner,
)
self.assertEqual(result.coord_dims("projection_y_coordinate"), (0,))
self.assertEqual(result.coord_dims("projection_x_coordinate"), (1,))
y_spacing = abs(
result.coord("projection_y_coordinate").points[0]
- result.coord("projection_y_coordinate").points[1]
)
x_spacing = abs(
result.coord("projection_x_coordinate").points[0]
- result.coord("projection_x_coordinate").points[1]
)
self.assertEqual(y_spacing, grid_spacing)
self.assertEqual(x_spacing, grid_spacing)
self.assertEqual(
result.coord("projection_y_coordinate").points[0], domain_corner[0]
)
self.assertEqual(
result.coord("projection_x_coordinate").points[0], domain_corner[1]
)
def test_latlon_domain_corner(self):
"""Test grid points generated with default grid spacing if domain corner provided and grid spacing
not provided"""
domain_corner = (-17, -10)
result = set_up_variable_cube(
self.data, spatial_grid="latlon", domain_corner=domain_corner
)
self.assertArrayEqual(result.coord("latitude").points, [-17.0, -7.0, 3.0])
self.assertArrayEqual(
result.coord("longitude").points, [-10.0, 0.0, 10.0, 20.0]
)
def test_equalarea_domain_corner(self):
"""Test grid points generated with default grid spacing if domain corner provided and grid spacing
not provided"""
domain_corner = (1100, 300)
result = set_up_variable_cube(
self.data, spatial_grid="equalarea", domain_corner=domain_corner
)
self.assertArrayEqual(
result.coord("projection_y_coordinate").points, [1100.0, 3100.0, 5100.0]
)
self.assertArrayEqual(
result.coord("projection_x_coordinate").points,
[300.0, 2300.0, 4300.0, 6300.0],
)
class Test_set_up_percentile_cube(IrisTest):
"""Test the set_up_percentile_cube function"""
def setUp(self):
"""Set up simple array of percentile-type data"""
self.data = np.array(
[
[[273.5, 275.1, 274.9], [274.2, 274.8, 274.1]],
[[274.2, 276.4, 275.5], [275.1, 276.8, 274.6]],
[[275.6, 278.1, 277.2], [276.4, 277.5, 275.3]],
],
dtype=np.float32,
)
self.percentiles = np.array([20, 50, 80])
def test_defaults(self):
"""Test default arguments produce cube with expected dimensions
and metadata"""
result = set_up_percentile_cube(self.data, self.percentiles)
perc_coord = result.coord("percentile")
self.assertArrayEqual(perc_coord.points, self.percentiles)
self.assertEqual(perc_coord.units, "%")
check_mandatory_standards(result)
def test_standard_grid_metadata(self):
"""Test standard grid metadata"""
result = set_up_percentile_cube(
self.data, self.percentiles, standard_grid_metadata="uk_ens"
)
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "uk_extended")
self.assertEqual(result.attributes["mosg__model_configuration"], "uk_ens")
def test_single_percentile(self):
"""Test a cube with one percentile correctly stores this as a scalar
coordinate"""
result = set_up_percentile_cube(self.data[1:2], self.percentiles[1:2])
dim_coords = get_dim_coord_names(result)
self.assertNotIn("percentile", dim_coords)
class Test_set_up_probability_cube(IrisTest):
"""Test the set_up_probability_cube function"""
def setUp(self):
"""Set up array of exceedance probabilities"""
self.data = np.array(
[
[[1.0, 1.0, 0.9], [0.9, 0.9, 0.8]],
[[0.8, 0.8, 0.7], [0.7, 0.6, 0.4]],
[[0.6, 0.4, 0.3], [0.3, 0.2, 0.1]],
[[0.2, 0.1, 0.0], [0.1, 0.0, 0.0]],
],
dtype=np.float32,
)
self.thresholds = np.array([275.0, 275.5, 276.0, 276.5], dtype=np.float32)
def test_defaults(self):
"""Test default arguments produce cube with expected dimensions
and metadata"""
result = set_up_probability_cube(self.data, self.thresholds)
thresh_coord = find_threshold_coordinate(result)
self.assertEqual(
result.name(), "probability_of_air_temperature_above_threshold"
)
self.assertEqual(result.units, "1")
self.assertArrayEqual(thresh_coord.points, self.thresholds)
self.assertEqual(thresh_coord.name(), "air_temperature")
self.assertEqual(thresh_coord.var_name, "threshold")
self.assertEqual(thresh_coord.units, "K")
self.assertEqual(len(thresh_coord.attributes), 1)
self.assertEqual(
thresh_coord.attributes["spp__relative_to_threshold"], "greater_than",
)
check_mandatory_standards(result)
def test_relative_to_threshold(self):
"""Test ability to reset the "spp__relative_to_threshold" attribute"""
data = np.flipud(self.data)
result = set_up_probability_cube(
data, self.thresholds, spp__relative_to_threshold="less_than"
)
self.assertEqual(len(result.coord(var_name="threshold").attributes), 1)
self.assertEqual(
result.coord(var_name="threshold").attributes["spp__relative_to_threshold"],
"less_than",
)
def test_relative_to_threshold_set(self):
"""Test that an error is raised if the "spp__relative_to_threshold"
attribute has not been set when setting up a probability cube"""
msg = "The spp__relative_to_threshold attribute MUST be set"
with self.assertRaisesRegex(ValueError, msg):
set_up_probability_cube(
self.data, self.thresholds, spp__relative_to_threshold=None
)
def test_standard_grid_metadata(self):
"""Test standard grid metadata"""
result = set_up_probability_cube(
self.data, self.thresholds, standard_grid_metadata="uk_ens"
)
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "uk_extended")
self.assertEqual(result.attributes["mosg__model_configuration"], "uk_ens")
def test_single_threshold(self):
"""Test a cube with one threshold correctly stores this as a scalar
coordinate"""
result = set_up_probability_cube(self.data[1:2], self.thresholds[1:2])
dim_coords = get_dim_coord_names(result)
self.assertNotIn("air_temperature", dim_coords)
def test_vicinity_cube(self):
"""Test an in-vicinity cube gets the correct name and threshold coordinate"""
result = set_up_probability_cube(
self.data, self.thresholds, variable_name="air_temperature_in_vicinity",
)
thresh_coord = find_threshold_coordinate(result)
self.assertEqual(
result.name(), "probability_of_air_temperature_in_vicinity_above_threshold"
)
self.assertEqual(thresh_coord.name(), "air_temperature")
self.assertEqual(thresh_coord.var_name, "threshold")
class Test_add_coordinate(IrisTest):
"""Test the add_coordinate utility"""
def setUp(self):
"""Set up new coordinate descriptors"""
self.height_points = np.arange(100.0, 1001.0, 100.0)
self.height_unit = "metres"
self.input_cube = set_up_variable_cube(
np.ones((3, 4), dtype=np.float32),
time=datetime(2017, 10, 10, 1, 0),
frt=datetime(2017, 10, 9, 21, 0),
)
def test_basic(self):
"""Test addition of a leading height coordinate"""
result = add_coordinate(
self.input_cube, self.height_points, "height", coord_units=self.height_unit
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertSequenceEqual(result.shape, (10, 3, 4))
self.assertEqual(result.coord_dims("height"), (0,))
self.assertArrayAlmostEqual(result.coord("height").points, self.height_points)
self.assertEqual(result.coord("height").dtype, np.float32)
self.assertEqual(result.coord("height").units, self.height_unit)
check_mandatory_standards(result)
def test_adding_coordinate_with_attribute(self):
"""Test addition of a leading height coordinate with an appropriate
attribute."""
height_attribute = {"positive": "up"}
result = add_coordinate(
self.input_cube,
self.height_points,
"height",
coord_units=self.height_unit,
attributes=height_attribute,
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.coord_dims("height"), (0,))
self.assertEqual(result.coord("height").attributes, height_attribute)
def test_reorder(self):
"""Test new coordinate can be placed in different positions"""
input_cube = set_up_variable_cube(np.ones((4, 3, 4), dtype=np.float32))
result = add_coordinate(
input_cube,
self.height_points,
"height",
coord_units=self.height_unit,
order=[1, 0, 2, 3],
)
self.assertSequenceEqual(result.shape, (4, 10, 3, 4))
self.assertEqual(result.coord_dims("height"), (1,))
def test_datatype(self):
"""Test coordinate datatype"""
result = add_coordinate(
self.input_cube,
self.height_points,
"height",
coord_units=self.height_unit,
dtype=np.int32,
)
self.assertEqual(result.coord("height").dtype, np.int32)
def test_datetime(self):
"""Test a leading time coordinate can be added successfully"""
datetime_points = [datetime(2017, 10, 10, 3, 0), datetime(2017, 10, 10, 4, 0)]
result = add_coordinate(
self.input_cube, datetime_points, "time", is_datetime=True
)
# check time is now the leading dimension
self.assertEqual(result.coord_dims("time"), (0,))
self.assertEqual(len(result.coord("time").points), 2)
# check forecast period has been updated
expected_fp_points = 3600 * np.array([6, 7], dtype=np.int64)
self.assertArrayAlmostEqual(
result.coord("forecast_period").points, expected_fp_points
)
def test_datetime_no_fp(self):
"""Test a leading time coordinate can be added successfully when there
is no forecast period on the input cube"""
self.input_cube.remove_coord("forecast_period")
datetime_points = [datetime(2017, 10, 10, 3, 0), datetime(2017, 10, 10, 4, 0)]
result = add_coordinate(
self.input_cube, datetime_points, "time", is_datetime=True
)
# check a forecast period coordinate has been added
expected_fp_points = 3600 * np.array([6, 7], dtype=np.int64)
self.assertArrayAlmostEqual(
result.coord("forecast_period").points, expected_fp_points
)
def test_time_points(self):
"""Test a time coordinate can be added using integer points rather
than datetimes, and that | |
highlight that part
if flex_st:
ax2[1].axvspan(flex_st, flex_en, alpha=0.2, color='red')
# Label the subplot
ax2[1].set_ylabel('Y Coordinate (m)')
ax2[1].tick_params(labelbottom=False)
ax2[1].title.set_text('Y')
ax2[1].grid(True)
# Get y and error values for the Z coord plot
yZ = data.EST.Z.values[st_epoch:en_epoch]
yerrZ = np.sqrt(data.VAR.Z.values[st_epoch:en_epoch])
# Plot the x and y values for the bottom subplot, including error range
if diff_lag:
yZ,x_times = calc_diff_lag(yZ,x_times,diff_lag)
ax2[2].plot(x_times,yZ)
ax2[2].fill_between(x_times, yZ-yerrZ, yZ+yerrZ,alpha=0.2)
# If indices given for start and end to flex event give, highlight that part
if flex_st:
ax2[2].axvspan(flex_st, flex_en, alpha=0.2, color='red')
# Label the subplot
ax2[2].set_ylabel('Z Coordinate (m)')
#ax2[2].set_xlabel(f'Epoch Number (Epoch 1: {epoch1.strftime("%Y-DOY-%j %H:%M:%S")})')
ax2[2].title.set_text('Z')
ax2[2].grid(True)
# Formating x-axis
myFmt = mdates.DateFormatter('%b-%d %H:%M')
ax2[2].xaxis.set_major_formatter(myFmt)
fig2.autofmt_xdate()
# Given title to the entire figure and show
fig2.suptitle(f'Coordinate Estimates - {station} - {date_str}')
if save_fig:
plt.tight_layout()
f_save = f'{save_fig}{date_str}_-{station}-PPP_XYZ_Coordinates.png'
fig2.savefig(f_save)
print(f'Saved: {f_save}')
if show:
fig2.show()
return fig2,ax2
def trop_plot(
data,
station,
st_epoch = 0,
en_epoch = None,
flex_st = None,
flex_en = None,
save_fig = False,
show = False,
diff_lag = 0):
"""
Plot of the Zenith Tropospheric Delay
Input
data - Results to be plotted (df from _read_trace) - pandas DataFrame
st_epoch - If plotting is to start at some epoch other than first Epoch in the df (optional) - int
OPTIONAL
If a portion of the plot is to be highlight to show a flex event, need start and end epochs
flex_st - Epoch number where flex event starts - int
flex_en - Epoch number where flex event ends - int
If the figure is to be saved, set save_fig = True, this will save to pwd
Output
Zenith Tropospheric Delay Plot
"""
# Get Epoch info:
epochs = data.index.get_level_values(1).values
epoch1 = J2000_ORIGIN + np.timedelta64(epochs[st_epoch],'s')
date_str = epoch1.astype(datetime).strftime("%Y-DOY-%j")
# Set up figure
fig3,ax3 = plt.subplots(1,1, figsize = (12,6) )
# Get y, x and error values for the ZTD plot
y = data.EST.trop.values[st_epoch:en_epoch]
yerr = np.sqrt(data.VAR.trop.values[st_epoch:en_epoch])
x = epochs[st_epoch:en_epoch]
x_times = J2000_ORIGIN + np.array(x,dtype='timedelta64[s]')
# Plot x,y values and the error range
if diff_lag:
y,x_times = calc_diff_lag(y,x_times,diff_lag)
ax3.plot(x_times,y)
ax3.fill_between(x_times, y-yerr, y+yerr,alpha=0.2)
# If indices given for start and end to flex event give, highlight that part
if flex_st:
ax3.axvspan(flex_st, flex_en, alpha=0.2, color='red')
# Labels
ax3.set_ylabel('ZTD (m)')
#ax3.set_xlabel(f'Epoch 1: {epoch1.astype(datetime).strftime("%Y-DOY-%j %H:%M:%S")}')
ax3.title.set_text(f'Zenith Tropospheric Delay - {station} - {date_str}')
ax3.grid(True)
# Formating x-axis
myFmt = mdates.DateFormatter('%b-%d %H:%M')
ax3.xaxis.set_major_formatter(myFmt)
fig3.autofmt_xdate()
if save_fig:
plt.tight_layout()
f_save = f'{save_fig}{date_str}_-{station}-PPP_ZTD_Estimates.png'
fig3.savefig(f_save)
print(f'Saved: {f_save}')
if show:
fig3.show()
return fig3,ax3
def spppos_plot(
data,
station,
st_epoch = 0,
en_epoch = None,
flex_st = None,
flex_en = None,
save_fig = False,
show = False,
diff_lag = 0):
"""
Coordinate (X,Y,Z) plot using the data from the parseTRACEfile function and named station.
This is a 3 panel plot, each running horizontally and in X, Y, Z order from top to bottom
Input
data - Results to be plotted (dict from parseTRACEfile) - dict
station - The station of interest - str
st_epoch - If plotting is to start at some epoch other than Epoch 1 (optional) - int
OPTIONAL
If a portion of the plot is to be highlight to show a flex event, need start and end epochs
flex_st - Epoch number where flex event starts - int
flex_en - Epoch number where flex event ends - int
If the figure is to be saved, set save_fig = True, this will save to pwd
Output
SPP POS Plot
"""
epoch1 = data['Epoch1']
# Set up figure
fig4,ax4 = plt.subplots(3,1, figsize = (18,18) )
# Get y, x and error values for the X coord plot
y = np.array(data['sppPos']['X'])[st_epoch:en_epoch]
if en_epoch == None:
x = list(range(st_epoch, len(y) + st_epoch))
else:
x = list(range(st_epoch, len(y) + st_epoch - en_epoch))
# Plot the x and y values for the top most subplot, including error range
if diff_lag:
y,x = calc_diff_lag(y,x,diff_lag)
ax4[0].plot(x,y)
# If indices given for start and end to flex event give, highlight that part
if flex_st:
ax4[0].axvspan(flex_st, flex_en, alpha=0.2, color='red')
# Label the subplot
ax4[0].set_ylabel('X Coordinate (m)')
ax4[0].title.set_text('X')
ax4[0].grid(True)
# Get y and error values for the Y coord plot
y = np.array(data['sppPos']['Y'])[st_epoch:en_epoch]
# Plot the x and y values for the middle subplot, including error range
if diff_lag:
y,x = calc_diff_lag(y,x,diff_lag)
ax4[1].plot(x,y)
# If indices given for start and end to flex event give, highlight that part
if flex_st:
ax4[1].axvspan(flex_st, flex_en, alpha=0.2, color='red')
# Label the subplot
ax4[1].set_ylabel('Y Coordinate (m)')
ax4[1].title.set_text('Y')
ax4[1].grid(True)
# Get y and error values for the Z coord plot
y = np.array(data['sppPos']['Z'])[st_epoch:en_epoch]
# Plot the x and y values for the bottom subplot, including error range
if diff_lag:
y,x = calc_diff_lag(y,x,diff_lag)
ax4[2].plot(x,y)
# If indices given for start and end to flex event give, highlight that part
if flex_st:
ax4[2].axvspan(flex_st, flex_en, alpha=0.2, color='red')
# Label the subplot
ax4[2].set_ylabel('Z Coordinate (m)')
ax4[2].set_xlabel(f'Epoch Number (Epoch 1: {epoch1.strftime("%Y-DOY-%j %H:%M:%S")})')
ax4[2].title.set_text('Z')
ax4[2].grid(True)
# Given title to the entire figure and show
fig4.suptitle(f'SPP Coordinate Estimates - {station} - {epoch1.strftime("%Y-DOY-%j")} - {(timedelta(seconds = 30*(x[-1]+1))).days} Day Plot')
if save_fig:
f_save = f'{save_fig}{epoch1.strftime("%Y-DOY-%j")}_-{station}-SPP_XYZ_Coordinates.png'
fig4.savefig(f_save)
print(f'Saved: {f_save}')
if show:
fig4.show()
return fig4,ax4
def pos_diff_plot(
data,
station,
snx_path,
st_epoch = 0,
en_epoch = None,
ymin = None,
ymax = None,
flex_st = None,
flex_en = None,
save_fig = False,
show = False,
monument = 'A'):
"""
Coordinate (X,Y,Z) plot using the data from the parseTRACEfile function and named station.
This is a 3 panel plot, each running horizontally and in X, Y, Z order from top to bottom
Input
data - Results to be plotted (dict from parseTRACEfile) - dict
station - The station of interest - str
snx_path - snx file path to obtain reference coordinates to compare against - Path obj
OPTIONAL
st_epoch - If plotting is to start at some epoch other than Epoch 1 (optional) - int
If a portion of the plot is to be highlight to show a flex event, need start and end epochs
flex_st - Epoch number where flex event starts - int
flex_en - Epoch number where flex event ends - int
If the figure is to be saved, set save_fig = True, this will save to pwd
Output
PPP POS Difference Plot
"""
# Get Epoch info:
unzipped_indexes = zip(*data.index)
index_arr = np.array(list(unzipped_indexes))
epochs = index_arr[1]
epoch1 = J2000_ORIGIN + np.timedelta64(epochs[0],'s')
date_str = epoch1.astype(datetime).strftime("%Y-DOY-%j")
# Get snx reference coordinates-
df_snx = _read_snx_solution(snx_path)
station += f'_{monument}'
snx_epoch = J2000_ORIGIN + np.array(df_snx.loc[station].index[0],dtype='timedelta64[s]')
x_snx = df_snx.loc[station].EST.STAX.values[0]
y_snx = df_snx.loc[station].EST.STAY.values[0]
z_snx = df_snx.loc[station].EST.STAZ.values[0]
xSD_snx = df_snx.loc[station].STD.STAX.values[0]
ySD_snx = df_snx.loc[station].STD.STAY.values[0]
zSD_snx = df_snx.loc[station].STD.STAZ.values[0]
# Set up figure
fig5,ax5 = plt.subplots(1,1, figsize = (12,6) )
# Get y, x and error values for the X coord plot
yX = data.EST.X.values[st_epoch:en_epoch] - x_snx
yerrX = np.sqrt(data.VAR.X.values[st_epoch:en_epoch] + xSD_snx**2)
x = epochs[st_epoch:en_epoch]
x_times = J2000_ORIGIN + np.array(x,dtype='timedelta64[s]')
# Plot the x and y values for the top most subplot, including error range
ax5.plot(x_times,yX,label='X Estimate (PEA-IGS)')
ax5.fill_between(x_times, yX-yerrX, yX+yerrX,alpha=0.2)
# Get y and error values for the Y coord plot
yY = data.EST.Y.values[st_epoch:en_epoch] - y_snx
yerrY = np.sqrt(data.VAR.Y.values[st_epoch:en_epoch] + ySD_snx**2)
# Plot the x and y values for the middle subplot, including error range
ax5.plot(x_times,yY,label='Y Estimate (PEA-IGS)')
ax5.fill_between(x_times, yY-yerrY, yY+yerrY,alpha=0.2)
# Get y and error values for the Z coord plot
yZ = data.EST.Z.values[st_epoch:en_epoch] - z_snx
yerrZ = np.sqrt(data.VAR.Z.values[st_epoch:en_epoch] + zSD_snx**2)
# Plot the x and y values for the bottom subplot, including error range
ax5.plot(x_times,yZ,label='Z Estimate (PEA-IGS)')
ax5.fill_between(x_times, yZ-yerrZ, yZ+yerrZ,alpha=0.2)
# If indices given for start and end to flex event give, highlight that part
if flex_st:
ax5.axvspan(flex_st, flex_en, alpha=0.2, color='red')
ax5.set_ylim(ymin=ymin,ymax=ymax)
ax5.grid(True)
ax5.legend()
# Formating x-axis
myFmt = mdates.DateFormatter('%b-%d %H:%M')
ax5.xaxis.set_major_formatter(myFmt)
| |
None, 128835: None,
128836: None, 128837: None, 128838: None,
128839: None, 128840: None, 128841: None,
128842: None, 128843: None, 128844: None,
128845: None, 128846: None, 128847: None,
128848: None, 128849: None, 128850: None,
128851: None, 128852: None, 128853: None,
128854: None, 128855: None, 128856: None,
128857: None, 128858: None, 128859: None,
128860: None, 128861: None, 128862: None,
128863: None, 128864: None, 128865: None,
128866: None, 128867: None, 128868: None,
128869: None, 128870: None, 128871: None,
128872: None, 128873: None, 128874: None,
128875: None, 128876: None, 128877: None,
128878: None, 128879: None, 128880: None,
128881: None, 128882: None, 128883: None,
128896: None, 128897: None, 128898: None,
128899: None, 128900: None, 128901: None,
128902: None, 128903: None, 128904: None,
128905: None, 128906: None, 128907: None,
128908: None, 128909: None, 128910: None,
128911: None, 128912: None, 128913: None,
128914: None, 128915: None, 128916: None,
128917: None, 128918: None, 128919: None,
128920: None, 128921: None, 128922: None,
128923: None, 128924: None, 128925: None,
128926: None, 128927: None, 128928: None,
128929: None, 128930: None, 128931: None,
128932: None, 128933: None, 128934: None,
128935: None, 128936: None, 128937: None,
128938: None, 128939: None, 128940: None,
128941: None, 128942: None, 128943: None,
128944: None, 128945: None, 128946: None,
128947: None, 128948: None, 128949: None,
128950: None, 128951: None, 128952: None,
128953: None, 128954: None, 128955: None,
128956: None, 128957: None, 128958: None,
128959: None, 128960: None, 128961: None,
128962: None, 128963: None, 128964: None,
128965: None, 128966: None, 128967: None,
128968: None, 128969: None, 128970: None,
128971: None, 128972: None, 128973: None,
128974: None, 128975: None, 128976: None,
128977: None, 128978: None, 128979: None,
128980: None, 128981: None, 128982: None,
128983: None, 128984: None, 128992: None,
128993: None, 128994: None, 128995: None,
128996: None, 128997: None, 128998: None,
128999: None, 129000: None, 129001: None,
129002: None, 129003: None, 129024: None,
129025: None, 129026: None, 129027: None,
129028: None, 129029: None, 129030: None,
129031: None, 129032: None, 129033: None,
129034: None, 129035: None, 129040: None,
129041: None, 129042: None, 129043: None,
129044: None, 129045: None, 129046: None,
129047: None, 129048: None, 129049: None,
129050: None, 129051: None, 129052: None,
129053: None, 129054: None, 129055: None,
129056: None, 129057: None, 129058: None,
129059: None, 129060: None, 129061: None,
129062: None, 129063: None, 129064: None,
129065: None, 129066: None, 129067: None,
129068: None, 129069: None, 129070: None,
129071: None, 129072: None, 129073: None,
129074: None, 129075: None, 129076: None,
129077: None, 129078: None, 129079: None,
129080: None, 129081: None, 129082: None,
129083: None, 129084: None, 129085: None,
129086: None, 129087: None, 129088: None,
129089: None, 129090: None, 129091: None,
129092: None, 129093: None, 129094: None,
129095: None, 129104: None, 129105: None,
129106: None, 129107: None, 129108: None,
129109: None, 129110: None, 129111: None,
129112: None, 129113: None, 129120: None,
129121: None, 129122: None, 129123: None,
129124: None, 129125: None, 129126: None,
129127: None, 129128: None, 129129: None,
129130: None, 129131: None, 129132: None,
129133: None, 129134: None, 129135: None,
129136: None, 129137: None, 129138: None,
129139: None, 129140: None, 129141: None,
129142: None, 129143: None, 129144: None,
129145: None, 129146: None, 129147: None,
129148: None, 129149: None, 129150: None,
129151: None, 129152: None, 129153: None,
129154: None, 129155: None, 129156: None,
129157: None, 129158: None, 129159: None,
129168: None, 129169: None, 129170: None,
129171: None, 129172: None, 129173: None,
129174: None, 129175: None, 129176: None,
129177: None, 129178: None, 129179: None,
129180: None, 129181: None, 129182: None,
129183: None, 129184: None, 129185: None,
129186: None, 129187: None, 129188: None,
129189: None, 129190: None, 129191: None,
129192: None, 129193: None, 129194: None,
129195: None, 129196: None, 129197: None,
129280: None, 129281: None, 129282: None,
129283: None, 129284: None, 129285: None,
129286: None, 129287: None, 129288: None,
129289: None, 129290: None, 129291: None,
129293: None, 129294: None, 129295: None,
129296: None, 129297: None, 129298: None,
129299: None, 129300: None, 129301: None,
129302: None, 129303: None, 129304: None,
129305: None, 129306: None, 129307: None,
129308: None, 129309: None, 129310: None,
129311: None, 129312: None, 129313: None,
129314: None, 129315: None, 129316: None,
129317: None, 129318: None, 129319: None,
129320: None, 129321: None, 129322: None,
129323: None, 129324: None, 129325: None,
129326: None, 129327: None, 129328: None,
129329: None, 129330: None, 129331: None,
129332: None, 129333: None, 129334: None,
129335: None, 129336: None, 129337: None,
129338: None, 129339: None, 129340: None,
129341: None, 129342: None, 129343: None,
129344: None,
129345: None, 129346: None, 129347: None,
129348: None, 129349: None, 129350: None,
129351: None, 129352: None, 129353: None,
129354: None, 129355: None, 129356: None,
129357: None, 129358: None, 129359: None,
129360: None, 129361: None, 129362: None,
129363: None, 129364: None, 129365: None,
129366: None, 129367: None, 129368: None,
129369: None, 129370: None, 129371: None,
129372: None, 129373: None, 129374: None,
129375: None, 129376: None, 129377: None,
129378: None, 129379: None, 129380: None,
129381: None, 129382: None, 129383: None,
129384: None, 129385: None, 129386: None,
129387: None, 129388: None, 129389: None,
129390: None, 129391: None, 129392: None,
129393: None,
129395: None, 129396: None, 129397: None,
129398: None, 129402: None, 129403: None,
129404: None,
129405: None, 129406: None, 129407: None,
129408: None, 129409: None, 129410: None,
129411: None, 129412: None, 129413: None,
129414: None, 129415: None, 129416: None,
129417: None, 129418: None, 129419: None,
129420: None, 129421: None, 129422: None,
129423: None, 129424: None, 129425: None,
129426: None, 129427: None, 129428: None,
129429: None, 129430: None, 129431: None,
129432: None, 129433: None, 129434: None,
129435: None, 129436: None, 129437: None,
129438: None, 129439: None, 129440: None,
129441: None, 129442: None, 129445: None,
129446: None, 129447: None, 129448: None,
129449: None, 129450: None, 129454: None,
129455: None, 129456: None,
129457: None, 129458: None, 129459: None,
129460: None, 129461: None, 129462: None,
129463: None, 129464: None, 129465: None,
129466: None, 129467: None, 129468: None,
129469: None, 129470: None, 129471: None,
129472: None, 129473: None, 129474: None,
129475: None, 129476: None, 129477: None,
129478: None, 129479: None, 129480: None,
129481: None, 129482: None, 129485: None,
129486: None, 129487: None,
129488: None, 129489: None, 129490: None,
129491: None, 129492: None, 129493: None,
129494: None, 129495: None, 129496: None,
129497: None, 129498: None, 129499: None,
129500: None, 129501: None, 129502: None,
129503: None, 129504: None, 129505: None,
129506: None, 129507: None, 129508: None,
129509: None, 129510: None, 129511: None,
129512: None, 129513: None, 129514: None,
129515: None, 129516: None, 129517: None,
129518: None, 129519: None, 129520: None,
129521: None, 129522: None, 129523: None,
129524: None, 129525: None, 129526: None,
129527: None, 129528: None, 129529: None,
129530: None, 129531: None, 129532: None,
129533: None, 129534: None, 129535: None,
129536: None, 129537: None, 129538: None,
129539: None, 129540: None, 129541: None,
129542: None, 129543: None, 129544: None,
129545: None, 129546: None, 129547: None,
129548: None, 129549: None, 129550: None,
129551: None, 129552: None, 129553: None,
129554: None, 129555: None, 129556: None,
129557: None, 129558: None, 129559: None,
129560: None, 129561: None, 129562: None,
129563: None, 129564: None, 129565: None,
129566: None, 129567: None, 129568: None,
129569: None, 129570: None, 129571: None,
129572: None, 129573: None, 129574: None,
129575: None, 129576: None, 129577: None,
129578: None, 129579: None, 129580: None,
129581: None, 129582: None, 129583: None,
129584: None, 129585: None, 129586: None,
129587: None, 129588: None, 129589: None,
129590: None, 129591: None, 129592: None,
129593: None, 129594: None, 129595: None,
129596: None, 129597: None, 129598: None,
129599: None, 129600: None, 129601: None,
129602: None, 129603: None, 129604: None,
129605: None, 129606: None, 129607: None,
129608: None, 129609: None, 129610: None,
129611: None, 129612: None, 129613: None,
129614: None, 129615: None, 129616: None,
129617: None, 129618: None, 129619: None,
129632: None, 129633: None, 129634: None,
129635: None, 129636: None, 129637: None,
129638: None, 129639: None, 129640: None,
129641: None, 129642: None, 129643: None,
129644: None, 129645: | |
def __init__(
self,
component_type: str = None,
biz_alias: str = None,
extend_value: str = None,
label: str = None,
value: str = None,
key: str = None,
):
# 控件类型
self.component_type = component_type
# 控件别名
self.biz_alias = biz_alias
# 表单控件扩展数据
self.extend_value = extend_value
# 控件名称
self.label = label
# 控件填写的数据
self.value = value
# 控件唯一id
self.key = key
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.component_type is not None:
result['componentType'] = self.component_type
if self.biz_alias is not None:
result['bizAlias'] = self.biz_alias
if self.extend_value is not None:
result['extendValue'] = self.extend_value
if self.label is not None:
result['label'] = self.label
if self.value is not None:
result['value'] = self.value
if self.key is not None:
result['key'] = self.key
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('componentType') is not None:
self.component_type = m.get('componentType')
if m.get('bizAlias') is not None:
self.biz_alias = m.get('bizAlias')
if m.get('extendValue') is not None:
self.extend_value = m.get('extendValue')
if m.get('label') is not None:
self.label = m.get('label')
if m.get('value') is not None:
self.value = m.get('value')
if m.get('key') is not None:
self.key = m.get('key')
return self
class QueryAllFormInstancesResponseBodyResultValues(TeaModel):
def __init__(
self,
form_instance_id: str = None,
app_uuid: str = None,
form_code: str = None,
title: str = None,
creator: str = None,
modifier: str = None,
create_timestamp: int = None,
modify_timestamp: int = None,
out_instance_id: str = None,
out_biz_code: str = None,
attributes: Dict[str, Any] = None,
form_inst_data_list: List[QueryAllFormInstancesResponseBodyResultValuesFormInstDataList] = None,
):
# 表单实例id
self.form_instance_id = form_instance_id
# 应用搭建id
self.app_uuid = app_uuid
# 表单模板code
self.form_code = form_code
# 标题
self.title = title
# 创建人
self.creator = creator
# 修改人
self.modifier = modifier
# 创建时间
self.create_timestamp = create_timestamp
# 修改时间
self.modify_timestamp = modify_timestamp
# 外部实例编码
self.out_instance_id = out_instance_id
# 外部业务编码
self.out_biz_code = out_biz_code
# 扩展信息
self.attributes = attributes
# 表单实例数据
self.form_inst_data_list = form_inst_data_list
def validate(self):
if self.form_inst_data_list:
for k in self.form_inst_data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.form_instance_id is not None:
result['formInstanceId'] = self.form_instance_id
if self.app_uuid is not None:
result['appUuid'] = self.app_uuid
if self.form_code is not None:
result['formCode'] = self.form_code
if self.title is not None:
result['title'] = self.title
if self.creator is not None:
result['creator'] = self.creator
if self.modifier is not None:
result['modifier'] = self.modifier
if self.create_timestamp is not None:
result['createTimestamp'] = self.create_timestamp
if self.modify_timestamp is not None:
result['modifyTimestamp'] = self.modify_timestamp
if self.out_instance_id is not None:
result['outInstanceId'] = self.out_instance_id
if self.out_biz_code is not None:
result['outBizCode'] = self.out_biz_code
if self.attributes is not None:
result['attributes'] = self.attributes
result['formInstDataList'] = []
if self.form_inst_data_list is not None:
for k in self.form_inst_data_list:
result['formInstDataList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('formInstanceId') is not None:
self.form_instance_id = m.get('formInstanceId')
if m.get('appUuid') is not None:
self.app_uuid = m.get('appUuid')
if m.get('formCode') is not None:
self.form_code = m.get('formCode')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('creator') is not None:
self.creator = m.get('creator')
if m.get('modifier') is not None:
self.modifier = m.get('modifier')
if m.get('createTimestamp') is not None:
self.create_timestamp = m.get('createTimestamp')
if m.get('modifyTimestamp') is not None:
self.modify_timestamp = m.get('modifyTimestamp')
if m.get('outInstanceId') is not None:
self.out_instance_id = m.get('outInstanceId')
if m.get('outBizCode') is not None:
self.out_biz_code = m.get('outBizCode')
if m.get('attributes') is not None:
self.attributes = m.get('attributes')
self.form_inst_data_list = []
if m.get('formInstDataList') is not None:
for k in m.get('formInstDataList'):
temp_model = QueryAllFormInstancesResponseBodyResultValuesFormInstDataList()
self.form_inst_data_list.append(temp_model.from_map(k))
return self
class QueryAllFormInstancesResponseBodyResult(TeaModel):
def __init__(
self,
next_token: str = None,
has_more: bool = None,
max_results: int = None,
values: List[QueryAllFormInstancesResponseBodyResultValues] = None,
):
# 下一页的游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
# 分页大小
self.max_results = max_results
# 表单列表
self.values = values
def validate(self):
if self.values:
for k in self.values:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
if self.max_results is not None:
result['maxResults'] = self.max_results
result['values'] = []
if self.values is not None:
for k in self.values:
result['values'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
self.values = []
if m.get('values') is not None:
for k in m.get('values'):
temp_model = QueryAllFormInstancesResponseBodyResultValues()
self.values.append(temp_model.from_map(k))
return self
class QueryAllFormInstancesResponseBody(TeaModel):
def __init__(
self,
result: QueryAllFormInstancesResponseBodyResult = None,
):
# 分页结果
self.result = result
def validate(self):
if self.result:
self.result.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result is not None:
result['result'] = self.result.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('result') is not None:
temp_model = QueryAllFormInstancesResponseBodyResult()
self.result = temp_model.from_map(m['result'])
return self
class QueryAllFormInstancesResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryAllFormInstancesResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryAllFormInstancesResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryFormByBizTypeHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryFormByBizTypeRequest(TeaModel):
def __init__(
self,
app_uuid: str = None,
biz_types: List[str] = None,
):
# 应用搭建id
self.app_uuid = app_uuid
# 表单业务标识
self.biz_types = biz_types
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.app_uuid is not None:
result['appUuid'] = self.app_uuid
if self.biz_types is not None:
result['bizTypes'] = self.biz_types
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('appUuid') is not None:
self.app_uuid = m.get('appUuid')
if m.get('bizTypes') is not None:
self.biz_types = m.get('bizTypes')
return self
class QueryFormByBizTypeResponseBodyResult(TeaModel):
def __init__(
self,
creator: str = None,
app_uuid: str = None,
form_code: str = None,
form_uuid: str = None,
name: str = None,
memo: str = None,
owner_id: str = None,
app_type: int = None,
biz_type: str = None,
status: str = None,
create_time: int = None,
modifed_time: int = None,
content: str = None,
):
# 创建人
self.creator = creator
# 应用搭建id
self.app_uuid = app_uuid
# 模板code
self.form_code = form_code
# 表单uuid
self.form_uuid = form_uuid
# 模板名称
self.name = name
# 模板描述
self.memo = memo
# 数据归属id
self.owner_id = owner_id
# 表单类型,0为流程表单,1为数据表单
self.app_type = app_type
# 业务标识
self.biz_type = biz_type
# 模板状态
self.status = status
# 创建时间
self.create_time = create_time
# 修改时间
self.modifed_time = modifed_time
# 表单控件描述
self.content = content
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.creator is not None:
result['creator'] = self.creator
if self.app_uuid is not None:
result['appUuid'] = self.app_uuid
if self.form_code is not None:
result['formCode'] = self.form_code
if self.form_uuid is not None:
result['formUuid'] = self.form_uuid
if self.name is not None:
result['name'] = self.name
if self.memo is not None:
result['memo'] = self.memo
if self.owner_id is not None:
result['ownerId'] = self.owner_id
if self.app_type is not None:
result['appType'] = self.app_type
if self.biz_type is not None:
result['bizType'] = self.biz_type
if self.status is not None:
result['status'] = self.status
if self.create_time is not None:
result['createTime'] = self.create_time
if self.modifed_time is | |
<filename>scripts/optic/orthologs2list.py
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
optic/orthologs2list.py -
======================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python optic/orthologs2list.py --help
Type::
python optic/orthologs2list.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import string
import re
import optparse
import time
import warnings
import CGAT.Experiment as E
import CGAT.Orthologs as Orthologs
import CGAT.GraphTools as GraphTools
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: optic/orthologs2list.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-s", "--species-regex", dest="species_regex", type="string",
help="regular expression to extract species from identifier.")
parser.add_option("-g", "--gene-regex", dest="gene_regex", type="string",
help="regular expression to extract gene from identifier.")
parser.add_option("-b", "--only-best", dest="only_best", action="store_true",
help="write only the best pair for a pairing.")
parser.add_option("-w", "--no-within", dest="within", action="store_false",
help="do not write within species pairs.")
parser.add_option("-d", "--distances", dest="filename_distances", type="string",
help="filename with distances between transcripts.")
parser.add_option("-c", "--no-combine-genes", dest="combine_genes", action="store_false",
help="do not combine orthologous clusters which contain the same gene.")
parser.add_option("--filename-restrict-filter1", dest="filename_restrict_filter1", type="string",
help="filename with ids to filter out.")
parser.add_option("--filename-restrict-filter2", dest="filename_restrict_filter2", type="string",
help="filename with ids to filter out.")
parser.add_option("-f", "--format", dest="format", type="choice", choices=("graph", "components"),
help="output format.")
parser.add_option("-m", "--mode", dest="mode", type="choice", choices=("orthologs", "orphans"),
help="analyze either 'orthologs' or 'orphans'.")
parser.add_option("--genome1", dest="genome1", type="string",
help="first genome.")
parser.add_option("--genome2", dest="genome2", type="string",
help="second genome.")
parser.set_defaults(
species_regex="^([^|]+)\|",
gene_regex="^[^|]+\|[^|]+\|([^|]+)\|",
only_best=None,
filename_distances=None,
within=True,
combine_genes=True,
report_step=100000,
use_networkx=False,
separator="|",
genome1=None,
genome2=None,
mode="orthologs",
filename_restrict_filter1=None,
filename_restrict_filter2=None,
format="graph",
)
(options, args) = E.Start(parser, add_pipe_options=True)
rs = re.compile(options.species_regex)
rg = re.compile(options.gene_regex)
t0 = time.time()
# retrieve matches between pairs:
pairs = {}
max_dist = 0
if options.filename_distances and options.only_best:
infile = open(options.filename_distances, "r")
for line in infile:
if line[0] == "#":
continue
a, b, d = line[:-1].split("\t")[:3]
d = float(d)
if a < b:
key = "%s-%s" % (a, b)
else:
key = "%s-%s" % (b, a)
max_dist = max(d, max_dist)
pairs[key] = d
infile.close()
cluster_id = 0
ninput, noutput, nmissed, nskipped, nsingletons = 0, 0, 0, 0, 0
# Read positive filter information:
filter_restrict1 = {}
if options.filename_restrict_filter1:
xx, e = IOTools.ReadList(open(options.filename_restrict_filter1, "r"))
for x in xx:
filter_restrict1[Orthologs.Transcript(x).mTranscript] = True
filter_restrict2 = {}
if options.filename_restrict_filter2:
xx, e = IOTools.ReadList(open(options.filename_restrict_filter2, "r"))
for x in xx:
filter_restrict2[Orthologs.Transcript(x).mTranscript] = True
if options.loglevel >= 1:
options.stdlog.write("# read filtering information: %i/%i\n" %
(len(filter_restrict1), len(filter_restrict2)))
t1 = time.time()
if options.loglevel >= 1:
options.stdlog.write("# finished input in %i seconds.\n" % (t1 - t0))
orthologs = []
if options.mode == "orthologs":
orthologs = Orthologs.ReadInterpretation(sys.stdin, options.separator,
genome1=options.genome1,
genome2=options.genome2,
filter_restrict_transcripts1=filter_restrict1,
filter_restrict_transcripts2=filter_restrict2)
else:
orthologs = Orthologs.ReadOrphans(sys.stdin, options.separator,
genome1=options.genome1,
genome2=options.genome2,
filter_restrict_transcripts1=filter_restrict1,
filter_restrict_transcripts2=filter_restrict2)
ninput = len(orthologs)
max_dist = map(lambda x: x[4], orthologs)
t2 = time.time()
if options.loglevel >= 1:
options.stdlog.write(
"# reading %i groups in %i seconds.\n" % (ninput, t2 - t1))
if options.combine_genes:
if options.use_networkx:
nclusters = len(orthologs)
if options.loglevel >= 1:
options.stdlog.write(
"# before combining genes: %i clusters\n" % len(orthologs))
options.stdlog.flush()
# build links between all genes
# ignore warnings from networkx/matplotlib that a display
# can not be found
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import networkx
graph = networkx.Graph()
# This procedure skips genes with "0". This is a patch, because
# these genes should not be there in the first place.
iteration = 0
for transcripts1, transcripts2, genes1, genes2, weight in orthologs:
iteration += 1
if options.loglevel >= 1:
if (iteration % options.report_step == 0):
options.stdlog.write("# iteration: %i/%i (%i%%) in %i seconds.\n" %
(iteration, nclusters,
100 * iteration / nclusters, time.time() - t2))
options.stdlog.flush()
for g in genes1.keys():
graph.add_node((1, g))
for g in genes2.keys():
graph.add_node((2, g))
for g1 in genes1.keys():
if g1 == "0":
continue
for g2 in genes1.keys():
if g2 == "0":
continue
graph.add_edge((1, g1), (2, g2))
for g2 in genes2.keys():
if g2 == "0":
continue
graph.add_edge((1, g1), (2, g2))
for g1 in genes2.keys():
if g1 == "0":
continue
for g2 in genes2.keys():
if g2 == "0":
continue
graph.add_edge((2, g1), (2, g2))
if options.loglevel >= 1:
options.stdlog.write(
"# created graph in %i seconds.\n" % (time.time() - t2))
options.stdlog.flush()
tt2 = time.time()
components = networkx.connected_components(graph)
if options.loglevel >= 1:
options.stdlog.write(
"# calculated connected components in %i seconds\n" % (time.time() - tt2))
options.stdlog.flush()
else:
graph = GraphTools.ExternalGraph()
iteration = 0
nclusters = len(orthologs)
for transcripts1, transcripts2, genes1, genes2, weight in orthologs:
iteration += 1
if options.loglevel >= 1:
if (iteration % options.report_step == 0):
options.stdlog.write("# iteration: %i/%i (%i%%) in %i seconds.\n" %
(iteration, nclusters,
100 * iteration / nclusters, time.time() - t1))
options.stdlog.flush()
f = "%s;%s"
for g1 in genes1.keys():
if g1 == "0":
continue
for g2 in genes1.keys():
if g2 == "0":
continue
graph.add_edge(f % (1, g1), f % (2, g2))
for g2 in genes2.keys():
if g2 == "0":
continue
graph.add_edge(f % (1, g1), f % (2, g2))
for g1 in genes2.keys():
if g1 == "0":
continue
for g2 in genes2.keys():
if g2 == "0":
continue
graph.add_edge(f % (2, g1), f % (2, g2))
if options.loglevel >= 1:
options.stdlog.write(
"# created graph in %i seconds\n" % (time.time() - t2))
options.stdlog.flush()
tt2 = time.time()
graph.finalize()
components = graph.connected_components()
if options.loglevel >= 1:
options.stdlog.write("# retrieved %i connected components in %i seconds\n" % (
len(components), time.time() - tt2))
options.stdlog.flush()
for x in range(len(components)):
components[x] = map(lambda y: y.split(";"), components[x])
tt2 = time.time()
map_gene2cluster = {}
for x in range(len(components)):
for a, b in components[x]:
map_gene2cluster[b] = x
new_orthologs = [[[], [], 0] for x in range(len(components))]
singletons = []
for transcripts1, transcripts2, genes1, genes2, weight in orthologs:
if genes1:
try:
cluster_id = map_gene2cluster[genes1.keys()[0]]
except KeyError:
singletons.append(genes1)
elif genes2:
try:
cluster_id = map_gene2cluster[genes2.keys()[0]]
except KeyError:
singletons.append(genes2)
else:
raise "Error, both genes1 and genes2 are emtpy."
new_orthologs[cluster_id][0] += transcripts1
new_orthologs[cluster_id][1] += transcripts2
new_orthologs[cluster_id][2] = weight
nsingletons = len(singletons)
orthologs = map(lambda x: (x[0], x[1],
Orthologs.GetGenes(x[0]),
Orthologs.GetGenes(x[1]),
weight), new_orthologs)
if options.loglevel >= 1:
options.stdlog.write(
"# combining genes in %i seconds\n" % (time.time() - tt2))
options.stdlog.flush()
if options.loglevel >= 1:
options.stdlog.write("# after combining genes: %i clusters, %i singletons\n" % (
len(orthologs), nsingletons))
t3 = time.time()
if options.loglevel >= 1:
options.stdlog.write("# gene clustering in %i seconds.\n" % (t3 - t2))
cluster_id = 0
def getCode(s):
if len(s) == 1:
return "1"
elif len(s) == 0:
return "0"
else:
return "m"
for transcripts1, transcripts2, genes1, genes2, weight in orthologs:
cluster_id += 1
g1 = getCode(genes1)
g2 = getCode(genes2)
t1 = getCode(transcripts1)
t2 = getCode(transcripts2)
if options.format == "graph":
# find best transcripts
best_transcripts = {}
if options.only_best:
# print only best match between each possible set of genes in
# ortholog pair
for gg1, tt1 in genes1.items():
for gg2, tt2 in genes2.items():
best = max_dist
best_pair = None
for x in tt1:
for y in tt2:
if x < y:
key = <KEY>
else:
key = <KEY>
if key in pairs:
if best > pairs[key]:
best = pairs[key]
best_pair = (x, y)
if best_pair:
best_transcripts[x] = 1
best_transcripts[y] = 1
options.stdout.write("%s\t%s\t%6.4f\t%s%s\t%s%s\t%i\n" % (
best_pair[0], best_pair[1], weight, g1, g2, str(t1), str(t2), cluster_id))
noutput += 1
else:
options.stdlog.write(
"# missed link between: %s %s\n" % (str(genes1), str(genes2)))
nmissed += 1
else:
for x in transcripts1:
for y in transcripts2:
options.stdout.write("%s\t%s\t%6.4f\t%s%s\t%s%s\t%i\n" % (
x, y, weight, g1, g2, str(t1), str(t2), cluster_id))
noutput += 1
if options.within:
# add self links for first species.
for x in range(len(transcripts1) - 1):
for y in range(x + 1, len(transcripts1)):
if not best_transcripts or \
(transcripts1[x] | |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Exports Chromium changes to web-platform-tests."""
import argparse
import logging
from blinkpy.common.system.log_utils import configure_logging
from blinkpy.w3c.local_wpt import LocalWPT
from blinkpy.w3c.chromium_exportable_commits import exportable_commits_over_last_n_commits
from blinkpy.w3c.common import (
CHANGE_ID_FOOTER,
WPT_GH_URL,
WPT_REVISION_FOOTER,
EXPORT_PR_LABEL,
PROVISIONAL_PR_LABEL,
read_credentials,
)
from blinkpy.w3c.gerrit import GerritAPI, GerritCL, GerritError
from blinkpy.w3c.wpt_github import WPTGitHub, MergeError
from blinkpy.w3c.export_notifier import ExportNotifier
_log = logging.getLogger(__name__)
class TestExporter(object):
def __init__(self, host):
self.host = host
self.wpt_github = None
self.gerrit = None
self.dry_run = False
self.local_wpt = None
self.surface_failures_to_gerrit = False
def main(self, argv=None):
"""Creates PRs for in-flight CLs and merges changes that land on master.
Returns:
A boolean: True if success, False if there were any patch failures.
"""
options = self.parse_args(argv)
self.dry_run = options.dry_run
self.surface_failures_to_gerrit = options.surface_failures_to_gerrit
log_level = logging.DEBUG if options.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
# Having the full output when executive.run_command fails is useful when
# investigating a failed export, as all we have are logs.
self.host.executive.error_output_limit = None
credentials = read_credentials(self.host, options.credentials_json)
if not (credentials.get('GH_USER') and credentials.get('GH_TOKEN')):
_log.error('You must provide your GitHub credentials for this '
'script to work.')
_log.error('See https://chromium.googlesource.com/chromium/src'
'/+/master/docs/testing/web_platform_tests.md'
'#GitHub-credentials for instructions on how to set '
'your credentials up.')
return False
self.wpt_github = self.wpt_github or WPTGitHub(
self.host, credentials['GH_USER'], credentials['GH_TOKEN'])
self.gerrit = self.gerrit or GerritAPI(
self.host, credentials['GERRIT_USER'], credentials['GERRIT_TOKEN'])
self.local_wpt = self.local_wpt or LocalWPT(self.host,
credentials['GH_TOKEN'])
self.local_wpt.fetch()
_log.info('Searching for exportable in-flight CLs.')
# The Gerrit search API is slow and easy to fail, so we wrap it in a try
# statement to continue exporting landed commits when it fails.
try:
open_gerrit_cls = self.gerrit.query_exportable_open_cls()
except GerritError as e:
_log.info(
'In-flight CLs cannot be exported due to the following error:')
_log.error(str(e))
gerrit_error = True
else:
self.process_gerrit_cls(open_gerrit_cls)
gerrit_error = False
_log.info('Searching for exportable Chromium commits.')
exportable_commits, git_errors = self.get_exportable_commits()
self.process_chromium_commits(exportable_commits)
if git_errors:
_log.info(
'Attention: The following errors have prevented some commits from being '
'exported:')
for error in git_errors:
_log.error(error)
export_error = gerrit_error or git_errors
if export_error:
return not export_error
_log.info('Automatic export process has finished successfully.')
export_notifier_failure = False
if self.surface_failures_to_gerrit:
_log.info('Starting surfacing cross-browser failures to Gerrit.')
export_notifier_failure = ExportNotifier(
self.host, self.wpt_github, self.gerrit, self.dry_run).main()
return not export_notifier_failure
def parse_args(self, argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='log extra details that may be helpful when debugging')
parser.add_argument(
'--dry-run',
action='store_true',
help='See what would be done without actually creating or merging '
'any pull requests.')
parser.add_argument(
'--credentials-json',
required=True,
help='A JSON file with an object containing zero or more of the '
'following keys: GH_USER, GH_TOKEN, GERRIT_USER, GERRIT_TOKEN')
parser.add_argument(
'--surface-failures-to-gerrit',
action='store_true',
help='Indicates whether to run the service that surfaces GitHub '
'faliures to Gerrit through comments.')
return parser.parse_args(argv)
def process_gerrit_cls(self, gerrit_cls):
for cl in gerrit_cls:
self.process_gerrit_cl(cl)
def process_gerrit_cl(self, cl):
_log.info('Found Gerrit in-flight CL: "%s" %s', cl.subject, cl.url)
if not cl.has_review_started:
_log.info('CL review has not started, skipping.')
return
pull_request = self.wpt_github.pr_with_change_id(cl.change_id)
if pull_request:
# If CL already has a corresponding PR, see if we need to update it.
pr_url = '{}pull/{}'.format(WPT_GH_URL, pull_request.number)
_log.info('In-flight PR found: %s', pr_url)
pr_cl_revision = self.wpt_github.extract_metadata(
WPT_REVISION_FOOTER, pull_request.body)
if cl.current_revision_sha == pr_cl_revision:
_log.info(
'PR revision matches CL revision. Nothing to do here.')
return
_log.info('New revision found, updating PR...')
self.create_or_update_pr_from_inflight_cl(cl, pull_request)
else:
# Create a new PR for the CL if it does not have one.
_log.info('No in-flight PR found for CL. Creating...')
self.create_or_update_pr_from_inflight_cl(cl)
def process_chromium_commits(self, exportable_commits):
for commit in exportable_commits:
self.process_chromium_commit(commit)
def process_chromium_commit(self, commit):
_log.info('Found exportable Chromium commit: %s %s', commit.subject(),
commit.sha)
pull_request = self.wpt_github.pr_for_chromium_commit(commit)
if pull_request:
pr_url = '{}pull/{}'.format(WPT_GH_URL, pull_request.number)
_log.info('In-flight PR found: %s', pr_url)
if pull_request.state != 'open':
_log.info('Pull request is %s. Skipping.', pull_request.state)
return
if PROVISIONAL_PR_LABEL in pull_request.labels:
# If the PR was created from a Gerrit in-flight CL, update the
# PR with the final checked-in commit in Chromium history.
# TODO(robertma): Only update the PR when it is not up-to-date
# to avoid unnecessary Travis runs.
_log.info('Updating PR with the final checked-in change...')
self.create_or_update_pr_from_landed_commit(
commit, pull_request)
self.remove_provisional_pr_label(pull_request)
# Updating the patch triggers Travis, which will block merge.
# Return early and merge next time.
return
self.merge_pull_request(pull_request)
else:
_log.info('No PR found for Chromium commit. Creating...')
self.create_or_update_pr_from_landed_commit(commit)
def get_exportable_commits(self):
"""Gets exportable commits that can apply cleanly and independently.
Returns:
A list of ChromiumCommit for clean exportable commits, and a list
of error messages for other exportable commits that fail to apply.
"""
# Exportable commits that cannot apply cleanly are logged, and will be
# retried next time. A common case is that a commit depends on an
# earlier commit, and can only be exported after the earlier one.
return exportable_commits_over_last_n_commits(
self.host, self.local_wpt, self.wpt_github, require_clean=True)
def remove_provisional_pr_label(self, pull_request):
if self.dry_run:
_log.info(
'[dry_run] Would have attempted to remove the provisional PR label'
)
return
_log.info('Removing provisional label "%s"...', PROVISIONAL_PR_LABEL)
self.wpt_github.remove_label(pull_request.number, PROVISIONAL_PR_LABEL)
def merge_pull_request(self, pull_request):
if self.dry_run:
_log.info('[dry_run] Would have attempted to merge PR')
return
_log.info('Attempting to merge...')
# This is outside of the try block because if there's a problem communicating
# with the GitHub API, we should hard fail.
branch = self.wpt_github.get_pr_branch(pull_request.number)
try:
self.wpt_github.merge_pr(pull_request.number)
change_id = self.wpt_github.extract_metadata(
CHANGE_ID_FOOTER, pull_request.body)
if change_id:
cl = GerritCL(data={'change_id': change_id}, api=self.gerrit)
pr_url = '{}pull/{}'.format(WPT_GH_URL, pull_request.number)
cl.post_comment((
'The WPT PR for this CL has been merged upstream! {pr_url}'
).format(pr_url=pr_url))
except MergeError:
_log.warn('Could not merge PR.')
def create_or_update_pr_from_landed_commit(self, commit,
pull_request=None):
"""Creates or updates a PR from a landed Chromium commit.
Args:
commit: A ChromiumCommit object.
pull_request: Optional, a PullRequest namedtuple.
If specified, updates the PR instead of creating one.
"""
if pull_request:
self.create_or_update_pr_from_commit(
commit, provisional=False, pr_number=pull_request.number)
else:
branch_name = 'chromium-export-' + commit.short_sha
self.create_or_update_pr_from_commit(
commit, provisional=False, pr_branch_name=branch_name)
def create_or_update_pr_from_inflight_cl(self, cl, pull_request=None):
"""Creates or updates a PR from an in-flight Gerrit CL.
Args:
cl: A GerritCL object.
pull_request: Optional, a PullRequest namedtuple.
If specified, updates the PR instead of creating one.
"""
commit = cl.fetch_current_revision_commit(self.host)
patch = commit.format_patch()
success, error = self.local_wpt.test_patch(patch)
if not success:
_log.error('Gerrit CL patch did not apply cleanly:')
_log.error(error)
_log.debug(
'First 500 characters of patch: << END_OF_PATCH_EXCERPT')
_log.debug(patch[0:500])
_log.debug('END_OF_PATCH_EXCERPT')
return
footer = ''
# Change-Id can be deleted from the body of an in-flight CL in Chromium
# (https://crbug.com/gerrit/12244). We need to add it back. And we've
# asserted that cl.change_id is present in GerritCL.
if not self.wpt_github.extract_metadata(CHANGE_ID_FOOTER,
commit.message()):
_log.warn('Adding missing Change-Id back to %s', cl.url)
footer += '{}{}\n'.format(CHANGE_ID_FOOTER, cl.change_id)
# Reviewed-on footer is not in the git commit message of in-flight CLs,
# but a link to code review is useful so we add it manually.
footer += 'Reviewed-on: {}\n'.format(cl.url)
# WPT_REVISION_FOOTER is used by the exporter to check the CL revision.
footer += '{}{}'.format(WPT_REVISION_FOOTER, cl.current_revision_sha)
if pull_request:
pr_number = self.create_or_update_pr_from_commit(
commit,
provisional=True,
pr_number=pull_request.number,
pr_footer=footer)
# When surface_failures_to_gerrit is enabled, the pull request update comment below
# is ignored.
if self.surface_failures_to_gerrit:
return
if pr_number is None:
return
# TODO(jeffcarp): Turn PullRequest into a class with a .url method
cl.post_comment(
('Successfully updated WPT GitHub pull request with '
'new revision "{subject}": {pr_url}').format(
subject=cl.current_revision_description,
pr_url='%spull/%d' % (WPT_GH_URL, pull_request.number),
))
else:
branch_name = 'chromium-export-cl-{}'.format(cl.number)
pr_number = self.create_or_update_pr_from_commit(
commit,
provisional=True,
pr_footer=footer,
pr_branch_name=branch_name)
if pr_number is None:
return
cl.post_comment((
'Exportable changes to web-platform-tests were detected in this CL '
'and a pull request in the upstream repo has been made: {pr_url}.\n\n'
'When this CL lands, the bot will automatically merge the PR '
'on GitHub if the required GitHub checks pass; otherwise, '
'ecosystem-infra@ team will triage the failures and may contact you.\n\n'
'WPT Export docs:\n'
'https://chromium.googlesource.com/chromium/src/+/master'
'/docs/testing/web_platform_tests.md#Automatic-export-process'
).format(pr_url='%spull/%d' % (WPT_GH_URL, pr_number)))
def create_or_update_pr_from_commit(self,
commit,
provisional,
pr_number=None,
pr_footer='',
pr_branch_name=None):
"""Creates or updates a PR from a Chromium commit.
The commit can be either landed or in-flight. The exportable portion of
the patch is extracted and applied to a new branch in the local WPT
repo, whose name is determined by pr_branch_name (if the branch already
exists, it will be recreated from master). The branch is then pushed to
WPT on GitHub, from which a PR is created or updated.
Args:
commit: A ChromiumCommit object.
| |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`Campbell2003`, :class:`Campbell2003SHARE`,
:class:`Campbell2003MblgAB1987NSHMP2008`,
:class:`Campbell2003MblgJ1996NSHMP2008`,
:class:`Campbell2003MwNSHMP2008`
"""
import numpy as np
from openquake.hazardlib.gsim.base import CoeffsTable, GMPE
from openquake.hazardlib.gsim.utils import (
mblg_to_mw_atkinson_boore_87,
mblg_to_mw_johnston_96,
clip_mean)
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
class Campbell2003(GMPE):
"""
Implements GMPE developed by <NAME> and published as "Prediction of
Strong Ground Motion Using the Hybrid Empirical Method and Its Use in the
Development of Ground Motion (Attenuation) Relations in Eastern North
America" (Bulletting of the Seismological Society of America, Volume 93,
Number 3, pages 1012-1033, 2003). The class implements also the corrections
given in the erratum (2004).
"""
#: Supported tectonic region type is stable continental crust given that
#: the equations have been derived for Eastern North America.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration, see table 6, page 1022 (PGA is assumed
#: to be equal to SA at 0.01 s)
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
SA
])
#: Supported intensity measure component is the geometric mean of
#two : horizontal components
#:attr:`~openquake.hazardlib.const.IMC.AVERAGE_HORIZONTAL`,
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation type is only total, see equation 35, page
#: 1021
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: No site parameters are needed
REQUIRES_SITES_PARAMETERS = set()
#: Required rupture parameter is only magnitude, see equation 30 page
#: 1021.
REQUIRES_RUPTURE_PARAMETERS = set(('mag', ))
#: Required distance measure is closest distance to rupture, see equation
#: 30 page 1021.
REQUIRES_DISTANCES = set(('rrup', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
C = self.COEFFS[imt]
mean = self._compute_mean(C, rup.mag, dists.rrup)
stddevs = self._get_stddevs(C, stddev_types, rup.mag,
dists.rrup.shape[0])
return mean, stddevs
def _compute_mean(self, C, mag, rrup):
"""
Compute mean value according to equation 30, page 1021.
"""
mean = (C['c1'] +
self._compute_term1(C, mag) +
self._compute_term2(C, mag, rrup) +
self._compute_term3(C, rrup))
return mean
def _get_stddevs(self, C, stddev_types, mag, num_sites):
"""
Return total standard deviation as for equation 35, page 1021.
"""
stddevs = []
for _ in stddev_types:
if mag < 7.16:
sigma = C['c11'] + C['c12'] * mag
elif mag >= 7.16:
sigma = C['c13']
stddevs.append(np.zeros(num_sites) + sigma)
return stddevs
def _compute_term1(self, C, mag):
"""
This computes the term f1 in equation 31, page 1021
"""
return (C['c2'] * mag) + C['c3'] * (8.5 - mag) ** 2
def _compute_term2(self, C, mag, rrup):
"""
This computes the term f2 in equation 32, page 1021
"""
c78_factor = (C['c7'] * np.exp(C['c8'] * mag)) ** 2
R = np.sqrt(rrup ** 2 + c78_factor)
return C['c4'] * np.log(R) + (C['c5'] + C['c6'] * mag) * rrup
def _compute_term3(self, C, rrup):
"""
This computes the term f3 in equation 34, page 1021 but corrected
according to the erratum.
"""
f3 = np.zeros_like(rrup)
idx_between_70_130 = (rrup > 70) & (rrup <= 130)
idx_greater_130 = rrup > 130
f3[idx_between_70_130] = (
C['c9'] * (np.log(rrup[idx_between_70_130]) - np.log(70))
)
f3[idx_greater_130] = (
C['c9'] * (np.log(rrup[idx_greater_130]) - np.log(70)) +
C['c10'] * (np.log(rrup[idx_greater_130]) - np.log(130))
)
return f3
#: Coefficient tables are constructed from the electronic suplements of
#: the original paper.
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13
pga 0.0305 0.633 -0.0427 -1.591 -0.00428 0.000483 0.683 0.416 1.140 -0.873 1.030 -0.0860 0.414
0.020 1.3535 0.630 -0.0404 -1.787 -0.00388 0.000497 1.020 0.363 0.851 -0.715 1.030 -0.0860 0.414
0.030 1.1860 0.622 -0.0362 -1.691 -0.00367 0.000501 0.922 0.376 0.759 -0.922 1.030 -0.0860 0.414
0.050 0.3736 0.616 -0.0353 -1.469 -0.00378 0.000500 0.630 0.423 0.771 -1.239 1.042 -0.0838 0.443
0.075 -0.0395 0.615 -0.0353 -1.383 -0.00421 0.000486 0.491 0.463 0.955 -1.349 1.052 -0.0838 0.453
0.100 -0.1475 0.613 -0.0353 -1.369 -0.00454 0.000460 0.484 0.467 1.096 -1.284 1.059 -0.0838 0.460
0.150 -0.1901 0.616 -0.0478 -1.368 -0.00473 0.000393 0.461 0.478 1.239 -1.079 1.068 -0.0838 0.469
0.200 -0.4328 0.617 -0.0586 -1.320 -0.00460 0.000337 0.399 0.493 1.250 -0.928 1.077 -0.0838 0.478
0.300 -0.6906 0.609 -0.0786 -1.280 -0.00414 0.000263 0.349 0.502 1.241 -0.753 1.081 -0.0838 0.482
0.500 -0.5907 0.534 -0.1379 -1.216 -0.00341 0.000194 0.318 0.503 1.166 -0.606 1.098 -0.0824 0.508
0.750 -0.5429 0.480 -0.1806 -1.184 -0.00288 0.000160 0.304 0.504 1.110 -0.526 1.105 -0.0806 0.528
1.000 -0.6104 0.451 -0.2090 -1.158 -0.00255 0.000141 0.299 0.503 1.067 -0.482 1.110 -0.0793 0.543
1.500 -0.9666 0.441 -0.2405 -1.135 -0.00213 0.000119 0.304 0.500 1.029 -0.438 1.099 -0.0771 0.547
2.000 -1.4306 0.459 -0.2552 -1.124 -0.00187 0.000103 0.310 0.499 1.015 -0.417 1.093 -0.0758 0.551
3.000 -2.2331 0.492 -0.2646 -1.121 -0.00154 0.000084 0.310 0.499 1.014 -0.393 1.090 -0.0737 0.562
4.000 -2.7975 0.507 -0.2738 -1.119 -0.00135 0.000074 0.294 0.506 1.018 -0.386 1.092 -0.0722 0.575
""")
class Campbell2003SHARE(Campbell2003):
"""
Extends
:class:`~openquake.hazardlib.gsim.campbell_2003.Campbell2003` and
introduces adjustments for style of faulting and default rock soil
conditions as needed by the SHARE (http://www.share-eu.org/)
project.
"""
#: Required rupture parameters are magnitude and rake
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'rake'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extract faulting style and rock adjustment coefficients for the
# given imt
C_ADJ = self.COEFFS_FS_ROCK[imt]
mean, stddevs = super().get_mean_and_stddevs(
sites, rup, dists, imt, stddev_types)
# apply faulting style and rock adjustment factor for mean and std
mean = np.log(np.exp(mean) *
_compute_faulting_style_term(C_ADJ['Frss'],
self.CONSTS_FS['pR'],
self.CONSTS_FS['Fnss'],
self.CONSTS_FS['pN'],
rup.rake) * C_ADJ['AFrock'])
stddevs = np.array(stddevs)
return mean, stddevs
#: Coefficients for faulting style and rock adjustment
COEFFS_FS_ROCK = CoeffsTable(sa_damping=5, table="""\
IMT Frss AFrock
pga 1.220000 0.735106
0.020000 1.192000 0.474275
0.030000 1.178000 0.423049
0.050000 1.150000 0.550323
0.075000 1.115000 0.730061
0.100000 1.080000 0.888509
0.150000 1.150000 1.094622
0.200000 1.190000 1.197291
0.300000 1.230000 1.288309
0.500000 1.230000 1.311421
0.750000 1.199444 1.298212
1.000000 1.196667 1.265762
1.500000 1.191111 1.197583
2.000000 1.140000 1.215779
3.000000 1.140000 1.215779
4.000000 1.140000 1.215779
""")
#: Constants for faulting style adjustment
CONSTS_FS = {'Fnss': 0.95, 'pN': 0.01, 'pR': 0.81}
def _compute_faulting_style_term(Frss, pR, Fnss, pN, rake):
"""
Compute SHARE faulting style adjustment term.
"""
if rake > 30.0 and rake <= 150.0:
return np.power(Frss, 1 - pR) * np.power(Fnss, -pN)
elif rake > -120.0 and rake <= -60.0:
return np.power(Frss, - pR) * np.power(Fnss, 1 - pN)
else:
return np.power(Frss, - pR) * np.power(Fnss, - pN)
class Campbell2003MblgAB1987NSHMP2008(Campbell2003):
"""
Implement GMPE developed by <NAME> and described in
"Development of semi-empirical attenuation relationships for the CEUS",
U.S. Geological Survey, Award 01HQGR0011, final report.
Document available at:
http://earthquake.usgs.gov/research/external/reports/01HQGR0011.pdf
This GMPE is used by the National Seismic Hazard Mapping Project (NSHMP)
for the 2008 central and eastern US hazard model.
This class replicates the algorithm as implemented in
``subroutine getCampCEUS`` in the ``hazgridXnga2.f`` Fortran code available
at: http://earthquake.usgs.gov/hazards/products/conterminous/2008/software/
The class assumes rupture magnitude to be in Mblg scale (given that MFDs
for central and eastern US are given in this scale). Mblg is converted to
Mw using Atkinson and Boore 1987 conversion equation
Coefficients are given for the B/C (firm rock) conditions.
"""
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
C = self.COEFFS[imt]
mag = self._convert_magnitude(rup.mag)
mean = self._compute_mean(C, mag, dists.rrup)
mean = clip_mean(imt, mean)
stddevs = self._get_stddevs(C, stddev_types, mag, dists.rrup.size)
return mean, stddevs
def _convert_magnitude(self, mag):
"""
Convert magnitude from Mblg to Mw using Atkinson and Boore | |
"""
Helper functions for PASTIS.
"""
import glob
import os
import datetime
import importlib
import itertools
import time
from shutil import copy
import sys
from astropy.io import fits
import astropy.units as u
import fpdf
import logging
import logging.handlers
import numpy as np
from PyPDF2 import PdfFileMerger
from pastis.config import CONFIG_PASTIS
log = logging.getLogger()
def write_fits(data, filepath, header=None, metadata=None):
"""
Writes a fits file and adds header and metadata when necessary.
:param data: numpy data (aka image)
:param filepath: path to save the file, include filename.
:param header: astropy hdu.header.
:param metadata: list of MetaDataEntry objects that will get added to header.
:return: filepath
"""
# Make sure file ends with fit or fits.
#if not (filepath.endswith(".fit") or filepath.endswith(".fits")):
# filepath += ".fits"
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
# Create a PrimaryHDU object to encapsulate the data.
hdu = fits.PrimaryHDU(data)
if header is not None:
hdu.header = header
# Add metadata to header.
if metadata is not None:
for entry in metadata:
if len(entry.name_8chars) > 8:
print('Fits Header Keyword: ' + entry.name_8chars +
' is greater than 8 characters and will be truncated.')
if len(entry.comment) > 47:
print('Fits Header comment for ' + entry.name_8chars +
' is greater than 47 characters and will be truncated.')
hdu.header[entry.name_8chars[:8]] = (entry.value, entry.comment)
# Create a HDUList to contain the newly created primary HDU, and write to a new file.
fits.HDUList([hdu])
hdu.writeto(filepath, overwrite=True)
#print('Wrote ' + filepath)
return filepath
def write_all_fits_to_cube(path):
"""
Write all fits files in a directory to an image cube.
Directory can *only* contain fits files, and only files that you want in the cube. Subdirectories will be ignored.
:param path: string, path to directory that contains all fits files that should be put into cube; cube gets saved
into that same directory
"""
# Collect all filenames
all_file_names = [fname for fname in os.listdir(path) if os.path.isfile(os.path.join(path, fname))]
# Read all files into list
allfiles = []
for fname in all_file_names:
allfiles.append(fits.getdata(os.path.join(path, fname)))
cube = np.array(allfiles)
write_fits(cube, os.path.join(path, 'psf_cube.fits'))
def circle_mask(im, xc, yc, rcirc):
""" Create a circle on array im centered on xc, yc with radius rcirc; inside circle equals 1."""
x, y = np.shape(im)
newy, newx = np.mgrid[0:y,0:x]
circ = (newx-xc)**2 + (newy-yc)**2 < rcirc**2
return circ
def zoom_point(im, x, y, bb):
"""
Cut out a square box from image im centered on (x,y) with half-box size bb.
:param im: image from which box will be taken
:param x: x coordinate of center of box
:param y: y coordinate of center of box
:param bb: half-box size
:return:
"""
return im[int(y - bb):int(y + bb), int(x - bb):int(x + bb)]
def zoom_cen(im, bb):
"""
Cut out a square box from the image center with half-box size bb.
:param im: image from which box will be taken
:param bb: half-box size
:return:
"""
x = int(im.shape[1]/2)
y = int(im.shape[0]/2)
return im[int(y-bb):int(y+bb), int(x-bb):int(x+bb)]
def FFT(ef):
"""Do the numpy Fourier transform on complex array 'ef', together with all the shifting needed."""
FFT_E = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(ef)))
return FFT_E
def IFFT(ef):
"""Do the numpy inverse Fourier transform on complex array 'ef', together with all the shifting needed."""
IFFT_E = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(ef)))
return IFFT_E
def create_dark_hole(pup_im, iwa, owa, samp):
"""
Create a dark hole on pupil image pup_im.
:param pup_im: np.array of pupil image
:param iwa: inner working angle in lambda/D
:param owa: outer working angle in lambda/D
:param samp: sampling factor
:return: dh_area: np.array
"""
circ_inner = circle_mask(pup_im, pup_im.shape[0]/2., pup_im.shape[1]/2., iwa * samp) * 1 # *1 converts from booleans to integers
circ_outer = circle_mask(pup_im, pup_im.shape[0]/2., pup_im.shape[1]/2., owa * samp) * 1
dh_area = circ_outer - circ_inner
return dh_area
def dh_mean(im, dh):
"""
Return the dark hole contrast.
Calculate the mean intensity in the dark hole area dh of the image im.
im and dh have to have the same array size and shape.
:param im: array, normalized (by direct PSF peak pixel) image
:param dh: array, dark hole mask
"""
darkh = im * dh
con = np.mean(darkh[np.where(dh != 0)])
return con
@u.quantity_input(aber=u.nm)
def pastis_contrast(aber, matrix_pastis):
"""
Calculate the contrast with PASTIS matrix model.
:param aber: aberration vector, its length is number of segments, WFE aberration coefficients in NANOMETERS
:param matrix_pastis: PASTIS matrix, in contrast/nm^2
:return:
"""
result = np.matmul(np.matmul(aber, matrix_pastis), aber)
return result.value
def calc_statistical_mean_contrast(pastismatrix, cov_segments, coro_floor):
"""
Analytically calculate the *statistical* mean contrast for a set of segment requirements.
:param pastismatrix: array, PASTIS matrix [segs, modes]
:param cov_segments: array, segment-space covariance matrix Ca
:param coro_floor: float, coronagrpah contrast in absence of aberrations
:return: mean_c_stat, float
"""
mean_c_stat = np.trace(np.matmul(cov_segments, pastismatrix)) + coro_floor
return mean_c_stat
def calc_variance_of_mean_contrast(pastismatrix, cov_segments):
"""
Analytically calculate the variance of the *statistical* mean contrast for a set of segment requirements.
:param pastismatrix: array, PASTIS matrix [segs, modes]
:param cov_segments: array, segment-space covariance matrix Ca
:return: var, float
"""
var = 2 * np.trace(np.matmul(pastismatrix, np.matmul(cov_segments, (np.matmul(pastismatrix, cov_segments)))))
return var
def get_segment_list(instrument):
"""
Horribly hacky function to get correct segment number list for an instrument (LUVOIR, or HiCAT and JWST).
We can assume that all implemented instruments start their numbering at 0, at the center segment.
LUVOIR doesn't use the center segment though, so we start at 1 and go until 120, for a total of 120 segments.
HiCAT does use it, so we start at 0 and go to 36 for a total of 37 segments.
JWST does not have a center segment, but it uses custom segment names anyway, so we start the numbering with zero,
at the first segment that is actually controllable (A1).
:param instrument: string, "HiCAT", "LUVOIR" or "JWST"
:return: seglist, array of segment numbers (names! at least in LUVOIR and HiCAT case. For JWST, it's the segment indices.)
"""
if instrument not in ['LUVOIR', 'HiCAT', 'JWST', 'RST']:
raise ValueError('The instrument you requested is not implemented. Try with "LUVOIR", "HiCAT", "JWST" or "RST" instead.')
seglist = np.arange(CONFIG_PASTIS.getint(instrument, 'nb_subapertures'))
# Drop the center segment with label '0' when working with LUVOIR
if instrument == 'LUVOIR':
seglist += 1
return seglist
def apply_mode_to_luvoir(pmode, luvoir):
"""
Apply a PASTIS mode to the segmented mirror (SM) and return the propagated wavefront "through" the SM.
This function first flattens the segmented mirror and then applies all segment coefficients from the input mode
one by one to the segmented mirror.
:param pmode: array, a single PASTIS mode [nseg] or any other segment phase map in NANOMETERS
:param luvoir: LuvoirAPLC
:return: hcipy.Wavefront of the segmented mirror, hcipy.Wavefront of the detector plane
"""
# Flatten SM to be sure we have no residual aberrations
luvoir.flatten()
# Loop through all segments to put them on the segmented mirror one by one
for seg, val in enumerate(pmode):
val *= u.nm # the LUVOIR modes come out in units of nanometers
luvoir.set_segment(seg + 1, val.to(u.m).value / 2, 0, 0) # /2 because this SM works in surface, not OPD
# Propagate the aperture wavefront through the SM
psf, planes = luvoir.calc_psf(return_intermediate='efield')
return planes['seg_mirror'], psf
def segment_pairs_all(nseg):
"""
Return a generator with all possible segment pairs, including repeating ones.
E.g. if segments are 0, 1, 2, then the returned pairs will be:
00, 01, 02, 10, 11, 12, 20, 21, 22
:param nseg: int, number of segments
:return:
"""
return itertools.product(np.arange(nseg), np.arange(nseg))
def segment_pairs_non_repeating(nseg):
"""
Return a generator with all possible non-repeating segment pairs.
E.g. if segments are 0, 1, 2, then the returned pairs will be:
00, 01, 02, 11, 12, 22
:param nseg: int, number of segments
:return:
"""
return itertools.combinations_with_replacement(np.arange(nseg), r=2)
def pastis_matrix_measurements(nseg):
"""
Calculate the total number of measurements needed for a PASTIS matrix with nseg segments
:param nseg: int, total number of segments
:return: int, total number of measurements
"""
total_number = (nseg**2 + nseg) / 2
return int(total_number)
def symmetrize(array):
"""
Return a symmetrized version of NumPy array a.
Values 0 are replaced by the array value at the symmetric
position (with respect to the diagonal), i.e. if a_ij = 0,
then the returned array a' is such that a'_ij = a_ji.
Diagonal values are left untouched.
:param array: square NumPy array, such | |
<reponame>shadowridgedev/lingvo
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Speech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
from six.moves import zip
import tensorflow as tf
from lingvo.core import base_layer
from lingvo.core import base_model
from lingvo.core import lr_schedule
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.tasks.asr import decoder
from lingvo.tasks.asr import decoder_utils
from lingvo.tasks.asr import encoder
from lingvo.tasks.asr import frontend as asr_frontend
from lingvo.tools import audio_lib
# hyps: [num_beams, num_hyps_per_beam] of serialized Hypothesis protos.
# ids: [num_beams * num_hyps_per_beam, max_target_length].
# lens: [num_beams * num_hyps_per_beam].
# scores: [num_beams, num_hyps_per_beam].
# decoded: [num_beams, num_hyps_per_beam].
DecoderTopK = collections.namedtuple(
'topk', ['hyps', 'ids', 'lens', 'scores', 'decoded']) # pyformat: disable
class AsrModel(base_model.BaseTask):
"""Speech model."""
@classmethod
def Params(cls):
p = super(AsrModel, cls).Params()
p.encoder = encoder.AsrEncoder.Params()
p.decoder = decoder.AsrDecoder.Params()
p.Define(
'frontend', None,
'ASR frontend to extract features from input. Defaults to no frontend '
'which means that features are taken directly from the input.')
p.Define(
'target_key', '', 'If non-empty, will use the specified key from '
'input_batch.additional_tgts to set training targets.')
tp = p.train
tp.lr_schedule = (
lr_schedule.PiecewiseConstantLearningRateSchedule.Params().Set(
boundaries=[350000, 500000, 600000], values=[1.0, 0.1, 0.01,
0.001]))
tp.vn_start_step = 20000
tp.vn_std = 0.075
tp.l2_regularizer_weight = 1e-6
tp.learning_rate = 0.001
tp.clip_gradient_norm_to_value = 1.0
tp.grad_norm_to_clip_to_zero = 100.0
tp.tpu_steps_per_loop = 20
return p
@base_layer.initializer
def __init__(self, params):
if not params.name:
raise ValueError('params.name not set.')
super(AsrModel, self).__init__(params)
p = self.params
with tf.variable_scope(p.name):
# Construct the model.
if p.encoder:
if not p.encoder.name:
p.encoder.name = 'enc'
self.CreateChild('encoder', p.encoder)
if p.decoder:
if not p.decoder.name:
p.decoder.name = 'dec'
self.CreateChild('decoder', p.decoder)
if p.frontend:
self.CreateChild('frontend', p.frontend)
def _MakeDecoderTheta(self, theta):
"""Compute theta to be used by the decoder for computing metrics and loss.
This method can be over-ridden by child classes to add values to theta that
is passed to the decoder.
For example, to pass the one hot vector which indicates which data source
was selected a child class could over-ride this method as follows:
def _MakeDecoderTheta(self, theta):
decoder_theta = super(MyModel, self)._MakeDecoderTheta(theta)
decoder_theta.child_onehot = self.input_generator.GetInputSourceOneHot()
return decoder_theta
Args:
theta: A `.NestedMap` object containing variable values used to compute
loss and metrics.
Returns:
theta: A copy of the decoder theta.
"""
return theta.decoder.DeepCopy()
def ComputePredictions(self, theta, input_batch):
p = self.params
input_batch_src = input_batch.src
encoder_outputs = self._FrontendAndEncoderFProp(theta, input_batch_src)
if p.target_key:
tf.logging.info(
'Using batch.additional_tgts[%s] to source '
'tgts instead of batch.tgts.', p.target_key)
tgt = input_batch.additional_tgts[p.target_key]
else:
tgt = input_batch.tgt
decoder_theta = self._MakeDecoderTheta(theta)
return self.decoder.ComputePredictions(decoder_theta, encoder_outputs, tgt)
def ComputeLoss(self, theta, input_batch, predictions):
tgt = input_batch.tgt
if self.params.target_key:
tgt = input_batch.additional_tgts[self.params.target_key]
decoder_theta = self._MakeDecoderTheta(theta)
return self.decoder.ComputeLoss(decoder_theta, predictions, tgt)
def _FrontendAndEncoderFProp(self, theta, input_batch_src):
"""FProps through the frontend and encoder.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
input_batch_src: An input NestedMap as per `BaseAsrFrontend.FProp`.
Returns:
A NestedMap as from `AsrEncoder.FProp`.
"""
p = self.params
if p.frontend:
input_batch_src = self.frontend.FProp(theta.frontend, input_batch_src)
return self.encoder.FProp(theta.encoder, input_batch_src)
def _GetTopK(self, decoder_outs, tag=''):
hyps = decoder_outs.topk_hyps
ids = tf.identity(decoder_outs.topk_ids, name='TopKLabelIds' + tag)
lens = tf.identity(decoder_outs.topk_lens, name='TopKLabelLengths' + tag)
scores = decoder_outs.topk_scores
decoded = decoder_outs.topk_decoded
if ids is not None:
decoded = self.input_generator.IdsToStrings(ids, lens - 1)
decoded = tf.identity(decoded, name='top_k_decoded%s' % tag)
decoded = tf.reshape(decoded, tf.shape(hyps))
if scores is not None and hyps is not None:
scores = tf.reshape(scores, tf.shape(hyps))
return DecoderTopK(hyps, ids, lens, scores, decoded)
def _ComputeNormalizedWER(self, hyps, refs):
# Filter out all '<epsilon>' tokens for norm_wer computation.
hyps_no_epsilon = tf.regex_replace(hyps, '(<epsilon>)+', ' ')
# norm_wer is size [num_transcripts * hyps_per_beam, 2]
norm_wer = decoder_utils.ComputeWer(hyps_no_epsilon, refs)
# Split into two tensors of size [num_transcripts * hyps_per_beam, 1]
norm_wer_errors, norm_wer_words = tf.split(norm_wer, [1, 1], 1)
shape = [-1, self.params.decoder.beam_search.num_hyps_per_beam]
norm_wer_errors = tf.reshape(norm_wer_errors, shape)
norm_wer_words = tf.reshape(norm_wer_words, shape)
return norm_wer_errors, norm_wer_words
def AddAdditionalDecoderMetricsToGraph(
self, topk_hyps, filtered_hyps, filtered_refs, input_batch, decoder_outs):
"""Returns a dict of metrics which should be computed from decoded hyps."""
# The base class implementation returns an empty dictionary. Sub-classes can
# provide their own implementation.
return {}
def Decode(self, input_batch):
"""Constructs the inference graph."""
p = self.params
with tf.name_scope('fprop'), tf.name_scope(p.name):
encoder_outputs = self._FrontendAndEncoderFProp(self.theta,
input_batch.src)
if 'contextualizer' in self.decoder.theta:
self.decoder.contextualizer.SetContextMap(
input_batch.tgt, self.decoder.theta.contextualizer)
decoder_outs = self.decoder.BeamSearchDecode(encoder_outputs)
return self._ComputeDecoderMetrics(decoder_outs, input_batch)
def _ComputeDecoderMetrics(self, decoder_outs, input_batch):
"""Computes metrics on output from decoder.
Args:
decoder_outs: A `BeamSearchDecodeOutput`, a namedtuple containing the
decode results.
input_batch: A `NestedMap` of tensors representing the source, target,
and other components of the input batch.
Returns:
A dict of Tensors containing decoder output and metrics.
"""
p = self.params
topk = self._GetTopK(decoder_outs)
utt_ids = input_batch.sample_ids
tgt = input_batch.tgt
if p.target_key:
tgt = input_batch.additional_tgts[p.target_key]
transcripts = self.input_generator.IdsToStrings(
tgt.labels, tf.cast(
tf.reduce_sum(1.0 - tgt.paddings, 1) - 1.0, tf.int32))
# Filter out all isolated '<noise>' tokens.
noise_pattern = ' <noise> |^<noise> | <noise>$|^<noise>$'
filtered_refs = tf.regex_replace(transcripts, noise_pattern, ' ')
filtered_hyps = tf.regex_replace(topk.decoded, noise_pattern, ' ')
# Compute translation quality scores for all hyps.
filtered_refs = tf.tile(
tf.reshape(filtered_refs, [-1, 1]),
[1, p.decoder.beam_search.num_hyps_per_beam])
filtered_hyps = tf.reshape(filtered_hyps, [-1])
filtered_refs = tf.reshape(filtered_refs, [-1])
norm_wer_errors, norm_wer_words = self._ComputeNormalizedWER(
filtered_hyps, filtered_refs)
ret_dict = {
'target_ids': tgt.ids,
'target_labels': tgt.labels,
'target_weights': tgt.weights,
'target_paddings': tgt.paddings,
'utt_id': utt_ids,
'transcripts': transcripts,
'topk_decoded': topk.decoded,
'topk_ids': topk.ids,
'topk_lens': topk.lens,
'topk_scores': topk.scores,
'norm_wer_errors': norm_wer_errors,
'norm_wer_words': norm_wer_words,
}
ret_dict.update(
self.AddAdditionalDecoderMetricsToGraph(
topk, filtered_hyps, filtered_refs, input_batch, decoder_outs))
return ret_dict
def CreateAdditionalDecoderMetrics(self):
"""Returns a dictionary of additional metrics which should be computed."""
# The base class implementation returns an empty dictionary. Sub-classes can
# provide their own implementation.
return {}
def CreateDecoderMetrics(self):
base_metrics = {
'num_samples_in_batch': metrics.AverageMetric(),
'wer': metrics.AverageMetric(), # Word error rate.
'norm_wer': metrics.AverageMetric(), # Normalized word error rate.
'sacc': metrics.AverageMetric(), # Sentence accuracy.
'ter': metrics.AverageMetric(), # Token error rate.
'corpus_bleu': metrics.CorpusBleuMetric(),
'oracle_norm_wer': metrics.AverageMetric(),
}
# Add any additional metrics that should be computed.
base_metrics.update(self.CreateAdditionalDecoderMetrics())
return base_metrics
def UpdateAdditionalMetrics(self, dec_out_dict, dec_metrics_dict):
"""Updates and returns a dictionary of metrics based on decoded hyps."""
# Can be implemented in sub-classes to perform any model specific behavior.
# The default implementation just returns the metrics unchanged.
del dec_out_dict
return dec_metrics_dict
# TODO(prabhavalkar): Add support to save out the decoded hypotheses.
def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):
p = self.params
topk_scores = dec_out_dict['topk_scores']
topk_decoded = dec_out_dict['topk_decoded']
transcripts = dec_out_dict['transcripts']
utt_id = dec_out_dict['utt_id']
norm_wer_errors = dec_out_dict['norm_wer_errors']
norm_wer_words = dec_out_dict['norm_wer_words']
target_labels = dec_out_dict['target_labels']
target_paddings = dec_out_dict['target_paddings']
topk_ids = dec_out_dict['topk_ids']
topk_lens = dec_out_dict['topk_lens']
assert len(transcripts) == len(target_labels)
assert len(transcripts) == len(target_paddings)
assert len(transcripts) == len(topk_decoded)
assert len(utt_id) == len(transcripts)
assert (len(topk_ids) == p.decoder.beam_search.num_hyps_per_beam *
len(transcripts))
assert len(norm_wer_errors) == len(transcripts)
assert len(norm_wer_words) == len(transcripts)
dec_metrics_dict['num_samples_in_batch'].Update(len(transcripts))
def GetRefIds(ref_ids, ref_paddinds):
assert len(ref_ids) == len(ref_paddinds)
return_ids = []
for i in range(len(ref_ids)):
if ref_paddinds[i] == 0:
return_ids.append(ref_ids[i])
return return_ids
total_errs = 0
total_oracle_errs = 0
total_ref_words = 0
total_token_errs = 0
total_ref_tokens = 0
total_norm_wer_errs = 0
total_norm_wer_words = 0
total_accurate_sentences = 0
key_value_pairs = []
for i in range(len(transcripts)):
ref_str = transcripts[i]
tf.logging.info('utt_id: %s', utt_id[i])
tf.logging.info(' ref_str: %s', ref_str)
hyps = topk_decoded[i]
ref_ids = GetRefIds(target_labels[i], target_paddings[i])
hyp_index = i * p.decoder.beam_search.num_hyps_per_beam
top_hyp_ids = topk_ids[hyp_index][:topk_lens[hyp_index]]
total_ref_tokens += len(ref_ids)
_, _, _, token_errs = decoder_utils.EditDistanceInIds(
ref_ids, top_hyp_ids)
total_token_errs += token_errs
assert p.decoder.beam_search.num_hyps_per_beam == len(hyps)
filtered_ref = decoder_utils.FilterNoise(ref_str)
filtered_ref = decoder_utils.FilterEpsilon(filtered_ref)
oracle_errs = norm_wer_errors[i][0]
for n, (score, hyp_str) in enumerate(zip(topk_scores[i], hyps)):
tf.logging.info(' %f: %s', score, hyp_str)
filtered_hyp = decoder_utils.FilterNoise(hyp_str)
filtered_hyp = decoder_utils.FilterEpsilon(filtered_hyp)
ins, subs, dels, errs = decoder_utils.EditDistance(
filtered_ref, filtered_hyp)
# Note that these numbers are not consistent with what is used to
# compute normalized WER. In particular, these numbers will be inflated
# when the transcript contains punctuation.
tf.logging.info(' ins: %d, subs: %d, del: %d, total: %d', ins, subs,
dels, errs)
hyp_norm_wer_errors = norm_wer_errors[i][n]
hyp_norm_wer_words = norm_wer_words[i][n]
# Only aggregate scores of the top hypothesis.
if n == 0:
total_errs += errs
total_ref_words += | |
<gh_stars>0
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import configparser
import datetime
import logging
import os
import pathlib
from climetlab.scripts.tools import parse_args
from .str_utils import CamelCase, alphanum, camelCase, dashes, underscores
LOG = logging.getLogger(__name__)
# import climetlab.debug
APACHE_LICENCE = """This software is licensed under the terms of the Apache Licence Version 2.0
which can be obtained at http://www.apache.org/licenses/LICENSE-2.0."""
PREFIX_ECMWF_LICENCE = (
"""(C) Copyright {year} European Centre for Medium-Range Weather Forecasts."""
)
POSTFIX_ECMWF_LICENCE = """In applying this licence, ECMWF does not waive the privileges and immunities
granted to it by virtue of its status as an intergovernmental organisation
nor does it submit to any jurisdiction."""
class PluginContext:
def __init__(self, kind, **kwargs):
self.kind = kind
self._transformers = {}
self.kwargs = kwargs
def fill_form(self):
for t in TRANSFORMERS_CLASSES[self.kind]:
t(self)
@property
def template_dir(self):
here = os.path.dirname(__file__)
return os.path.realpath(os.path.join(here, "templates", self.kind))
@property
def output_dir(self):
return self("climetlab-plugin-name-climetlab-template")
def check_output_dir(self):
if os.path.exists(self.output_dir):
raise Exception(
f"Folder {self.output_dir} already exists. Not overwriting it."
)
def create_plugin(self):
self.check_output_dir()
for path in self.template_files_list():
template = os.path.join(self.template_dir, path)
output = os.path.join(self.output_dir, path)
output = self(output)
LOG.info(f"Creating {output}")
with open(template, "r") as f:
txt = f.read()
txt = self(txt)
os.makedirs(os.path.dirname(output), exist_ok=True)
with open(output, "w") as f:
f.write(txt)
print(f"Plugin built in {self.output_dir}")
print(self.final_help())
def template_files_list(self):
cwd = os.getcwd()
os.chdir(self.template_dir)
lst = [str(f) for f in pathlib.Path(".").glob("**/*") if os.path.isfile(str(f))]
# TODO: find a nicer way to avoid __path__ folders.
lst = [f for f in lst if "__pycache__" not in f]
os.chdir(cwd)
return lst
def __call__(self, txt):
if txt is None:
return None
assert isinstance(txt, str), txt
original = txt
for k, transformer in self._transformers.items():
txt = transformer(txt)
if txt != original:
txt = self(txt)
return txt
def get_default_email(self):
try:
return self._gitconfig("email")
except: # noqa:E722
return f'{self._transformers["full_name"].value.replace(" ", ".").lower()}@<EMAIL>'
def get_default_full_name(self):
try:
return self._gitconfig("name")
except: # noqa:E722
return "Joe Developer"
def _gitconfig(self, key):
if os.environ.get("CLIMETLAB_PLUGIN_TOOLS_NO_GUESS"):
raise Exception("CLIMETLAB_PLUGIN_TOOLS_NO_GUESS is set.")
config = configparser.ConfigParser()
gitconfig = os.path.expanduser("~/.gitconfig")
config.read(gitconfig)
value = config["user"][key]
LOG.info(f"Found {key} in gitconfig {value}")
return value
def final_help(self):
txt = """
--------------------------------------------------------------------
Climetlab plugin generated successfully. Next steps:
1. Create a repository on github at http://github.com/repo_url_climetlab_template.
2. Push to the repository as instructed by github:
cd climetlab-plugin-name-climetlab-template
git init
git add .
git commit -m'first commit'
git branch -M main
git remote add origin http://github.com/repo_url_climetlab_template
git push --set-upstream origin main
[Optional: See tests running http://github.com/repo_url_climetlab_template/actions]
3 - Publish to pipy (pip) manually:
python -m pip install --upgrade pip
pip install setuptools wheel twine
twine upload dist/*
# Need pipy login/password (create an account at https://pypi.org)
Others can now do `pip install climetlab-plugin-name-climetlab-template`.
4. Publish automatically from Github to pypi. [Optional]
Edit climetlab-plugin-name-climetlab-template/.github/workflows/check-and-publish to point to pypi instead of test.pypi.
Create a token from pypi at https://pypi.org/manage/account/token/
Add the token as a Github secret on the name PYPI_API_TOKEN at https://github.com/repo_url_climetlab_template/settings/secrets/actions/new
You are all set! Push the github repository and release from http://github.com/repo_url_climetlab_template/releases/new.
""" # noqa: E501
return self(txt)
class Transformer:
_help = ""
glob = None
def __init__(
self,
context,
key,
default=None,
pattern=None,
value=None,
force_prefix="",
):
LOG.debug(f"New Transformer({key})")
self._context = context
self.key = key
self.default = self._context(default)
self.force_prefix = self._context(force_prefix)
self.pattern = pattern
self.value = value
self.help = self._context(self._help)
self.fill()
LOG.debug(f"Transformer({key}) created")
def __repr__(self) -> str:
return f"Transformer({self.key}, pattern={self.pattern}, value={self.value})"
def fill(self):
if self.pattern is None:
self.pattern = self.key
if not self.glob:
self.adapts = [lambda x: x]
elif self.glob is True:
self.adapts = [underscores, dashes, CamelCase, camelCase]
else:
self.adapts = self.glob
self.read_value()
self.pattern = self.pattern + "_climetlab_template"
self._context._transformers[self.key] = self
def prompt(self):
return f"Please enter {self.desc} ('?' for help)"
def default_prompt(self):
if self.default:
return f"Hit 'return' to use the default value '{self.force_prefix}{self.default}'"
return ""
def try_reading_from_context(self):
if self._context.kwargs.get(self.key, None):
self.value = self._context.kwargs[self.key]
assert isinstance(self.value, str)
assert isinstance(self.force_prefix, str)
print(f"\n--> Using {self.force_prefix + self.value} (from command line)")
return True
def try_reading_from_user(self):
print()
value = input(">>>> " + self.force_prefix)
if value == "h" or value == "?":
print(f"?\n {self.help}")
if self.default is not None:
print(f" Default value: {self.force_prefix}{self.default}")
return self.try_reading_from_user()
if value:
self.value = value
print(f"\n--> Using {self.force_prefix + self.value}")
return True
def try_reading_from_default(self):
if self.default is not None:
print(f"\n--> Using {self.force_prefix + self.default} (default)")
self.value = self.default
return True
def read_value(self):
print()
print(self.prompt())
print(self.default_prompt())
if self.try_reading_from_context():
return
if self.try_reading_from_user():
return
if self.try_reading_from_default():
return
return self.read_value()
def __call__(self, txt):
for adapt in self.adapts:
p = adapt(self.pattern)
v = adapt(self.value)
if p in txt:
LOG.debug(f'Replacing "{p}" by "{v}"')
LOG.debug(f" k={self.key}")
LOG.debug(f" p: {self.pattern} -> {p}")
LOG.debug(f" v: {self.value} -> {v}")
txt = txt.replace(p, v)
return txt
class NoPromptTransformer(Transformer):
def read_value(self):
LOG.debug(f"{self.key}: not prompt using {self.value}.")
class GlobNoPromptTransformer(NoPromptTransformer):
glob = True
class SourceNameTransformer(GlobNoPromptTransformer):
def __init__(self, context):
name = context._transformers["plugin_name"].value
if name.endswith("-source"):
name = name[:-7]
super().__init__(context, "source_name", value=name)
class DatasetNameTransformer(Transformer):
desc = "the dataset name"
_help = """The dataset name is used as follow:
A climetlab dataset plugin package can provides one or more
datasets. This scripts creates a plugin with one dataset.
The dataset name will be used by the end users to access
the data through CliMetLab with:
cml.load_dataset("dataset-name", ...)
The convention is to make the dataset name start with
"plugin-name-climetlab-template".
The dataset name can easily be modified afterwards, without
regenerating a new plugin, simply by editing the setup.py."""
glob = True
def __init__(self, context):
super().__init__(
context,
"dataset_name",
default="",
force_prefix="plugin-name-climetlab-template",
)
def fill(self):
super().fill()
self.value = dashes(self.value).lower()
self.value = alphanum(self.value)
if self.value:
while self.value.startswith("-"):
self.value = self.value[1:]
name = "plugin-name-climetlab-template" + "-" + self.value
else:
self.value = "main"
name = "plugin-name-climetlab-template"
name = self._context(name)
GlobNoPromptTransformer(self._context, "dataset_full_name", value=name)
class PluginNameTransformer(Transformer):
desc = "the plugin name"
_help = """The plugin name is used to define:
- The python package name `import climetlab_{plugin_name} `
- The pip package name `pip install climetlab-{plugin-name}`.
It will also be used to suggest and appropriate URL on github.
The plugin_name can be the name of the project you are working on,
but notice that it should be specific enough as only one plugin with
a given name can be installed. Highly generic names (such as "meteo",
"domain", "copernicus", "country-name" are not recommended.
The plugin name cannot be easily modified afterwards.
You would need to regenerate a new one and copy existing code."""
glob = True
def __init__(self, context):
super().__init__(
context,
"plugin_name",
default="my_plugin",
)
context.check_output_dir()
class EmailTransformer(Transformer):
desc = "your email"
_help = """The email is used in setup.py to define the email maintainer of the pip package."""
def __init__(self, context):
super().__init__(
context,
"email",
default=context.get_default_email(),
)
class GithubUsernameTransformer(Transformer):
desc = "your Github user name"
_help = """The github username (or github space name) is used
to suggest a github repository url.
The username (ecmwf-lab) should be used if you wish to host your
repository on the github space "https://github.com/ecmwf-lab/").
Else, please provide your own github user name."""
def __init__(self, context):
super().__init__(
context,
"github_username",
default="ecmwf-lab",
)
class FullNameTransformer(Transformer):
desc = "your full name"
_help = """The full name is used in setup.py to define the maintainer of the pip package."""
def __init__(self, context):
super().__init__(
context,
"full_name",
default=context.get_default_full_name(),
)
class RepoUrlTransformer(Transformer):
desc = "the repository url"
_help = """The repository url name is used to define:
- The package url in the setup.py, i.e. the url published in Pypi for pip.
- The links in the README file.
If your do not want to host you repository on github,
please edit manually the generated setup.py afterwards."""
def __init__(self, context):
super().__init__(
context,
"repo_url",
default="github_username_climetlab_template/climetlab-plugin-name-climetlab-template",
force_prefix="https://github.com/",
)
class LicenceTransformer(Transformer):
_help = """The APACHE 2.0 licence is used for the plugin code.
Most users should answer "n" to use the standard APACHE 2.0 licence.
ECMWF users should answer "y" to add the appropriate addition to the licence.
The licence is added in the plugin code:
- In the header of each python file.
- In the LICENSE file.
- In the README.
If you choose another licence, | |
or self.jobs.rebuild_allowUpgrade):
if useTypoMetrics:
OS2f2T.fsSelection |= 1<<7
else:
OS2f2T.fsSelection &= ~(1<<7)
if OS2f2T.version < 4:
OS2f2T.version = 4
if isinstance(forcePreferredFamily, bool) and \
(OS2f2T.version > 3 or self.jobs.rebuild_allowUpgrade):
if forcePreferredFamily:
OS2f2T.fsSelection |= 1<<8
else:
OS2f2T.fsSelection &= ~(1<<8)
if OS2f2T.version < 4:
OS2f2T.version = 4
if isinstance(isMonospaced, bool):
if isMonospaced:
# Update average char width
if (isinstance(monoLatinWidth, int) or isinstance(monoLatinWidth, float)):
OS2f2T.xAvgCharWidth = int(abs(monoLatinWidth))
elif self.jobs.general_recalc:
OS2f2T.xAvgCharWidth = Workers.OS2f2Worker.recalcXAvgCharWidth(self.font["hmtx"], True)
else:
pass
# Update PANOSE
if OS2f2T.panose.bFamilyType in [2, 4]:
OS2f2T.panose.bProportion = 9
elif OS2f2T.panose.bFamilyType in [3, 5]:
OS2f2T.panose.bProportion = 3
else:
pass
else:
# We must update average char width again even though it has been updated by the Fixer
# because the monospace switch might be turned on before we arrive here.
if self.jobs.general_recalc:
OS2f2T.xAvgCharWidth = Workers.OS2f2Worker.recalcXAvgCharWidth(self.font["hmtx"], False)
# Update PANOSE
if OS2f2T.panose.bFamilyType == 2:
OS2f2T.panose.bProportion = 3
elif OS2f2T.panose.bFamilyType in [3, 5]:
OS2f2T.panose.bProportion = 2
elif OS2f2T.panose.bFamilyType == 4:
OS2f2T.panose.bProportion = 5
else:
pass
if ibmClass:
styleClass = OS2f2T.sFamilyClass>>8
styleSubclass = OS2f2T.sFamilyClass & 0b11111111
if ibmClass.get("ibmStyleClass") in range(0, 16):
styleClass = ibmClass.get("ibmStyleClass")
if ibmClass.get("ibmStyleSubclass") in range(0, 16):
styleSubclass = ibmClass.get("ibmStyleSubclass")
OS2f2T.sFamilyClass = (styleClass<<8) + styleSubclass
if panose:
if panose.get("familykind") in range(0, 6):
OS2f2T.panose.bFamilyType = panose.get("familykind")
if panose.get("subkind1") in range(0, 17):
OS2f2T.panose.bSerifStyle = panose.get("subkind1")
if panose.get("subkind2") in range(0, 17):
OS2f2T.panose.bWeight = panose.get("subkind2")
if panose.get("subkind3") in range(0, 17):
OS2f2T.panose.bProportion = panose.get("subkind3")
if panose.get("subkind4") in range(0, 17):
OS2f2T.panose.bContrast = panose.get("subkind4")
if panose.get("subkind5") in range(0, 17):
OS2f2T.panose.bStrokeVariation = panose.get("subkind5")
if panose.get("subkind6") in range(0, 17):
OS2f2T.panose.bArmStyle = panose.get("subkind6")
if panose.get("subkind7") in range(0, 17):
OS2f2T.panose.bLetterForm = panose.get("subkind7")
if panose.get("subkind8") in range(0, 17):
OS2f2T.panose.bMidline = panose.get("subkind8")
if panose.get("subkind9") in range(0, 17):
OS2f2T.panose.bXHeight = panose.get("subkind9")
return
def __updateOS2f2_addNewAttrs(self):
OS2f2T = self.font.get("OS/2")
# Add version 1 stuff:
if not hasattr(OS2f2T, "ulCodePageRange1"):
OS2f2T.ulCodePageRange1 = 0
if not hasattr(OS2f2T, "ulCodePageRange2"):
OS2f2T.ulCodePageRange2 = 0
# Add version 2 stuff:
if not hasattr(OS2f2T, "sxHeight"):
OS2f2T.sxHeight = 0
if not hasattr(OS2f2T, "sCapHeight"):
OS2f2T.sCapHeight = 0
if not hasattr(OS2f2T, "usDefaultChar"):
OS2f2T.usDefaultChar = 0
if not hasattr(OS2f2T, "usBreakChar"):
OS2f2T.usBreakChar = 0
if not hasattr(OS2f2T, "usMaxContext"):
OS2f2T.usMaxContext = 0
# Add version 5 stuff:
if not hasattr(OS2f2T, "usLowerOpticalPointSize"):
OS2f2T.usLowerOpticalPointSize = 0
if not hasattr(OS2f2T, "usUpperOpticalPointSize"):
OS2f2T.usUpperOpticalPointSize = 0
return
def __updateOS2f2_width2Panose(self, widthScale, panose):
if panose.bFamilyType == 2:
if widthScale in [1, 2]:
panose.bProportion = 8
elif widthScale in [3, 4]:
panose.bProportion = 6
elif widthScale in [6, 7]:
panose.bProportion = 5
elif widthScale in [8, 9]:
panose.bProportion = 7
else:
pass
elif panose.bFamilyType == 3:
if widthScale in [1, 2]:
panose.bContrast = 2
elif widthScale in [3, 4]:
panose.bContrast = 3
elif widthScale == 5:
panose.bContrast = 4
elif widthScale in [6, 7]:
panose.bContrast = 5
elif widthScale in [8, 9]:
panose.bContrast = 6
else:
pass
elif panose.bFamilyType == 4:
if widthScale == 1:
panose.bProportion = 2
elif widthScale == 2:
panose.bProportion = 3
elif widthScale in [3, 4]:
panose.bProportion = 4
elif widthScale == 5:
panose.bProportion = 5
elif widthScale in [6, 7]:
panose.bProportion = 6
elif widthScale == 8:
panose.bProportion = 7
elif widthScale == 9:
panose.bProportion = 8
else:
pass
return
def __rebuildName(self):
name = self.config.get("Name")
if not name:
return
elif not name.get("en") or not self.__loadUstr(name["en"].get("fontFamily")):
print("WARNING: No valid English font family detected in the configuration.", file = sys.stderr)
print("Please make sure that [Name.en] and English \"fontFamily\" are correctly specified.", file = sys.stderr)
print("Configurating section [Name] is now ignored.", file = sys.stderr)
return
en = name.get("en")
general = self.config.get("General")
style = self.config.get("Style")
builder = Builders.NameTableBuilder()
# Add PostScript CID Findfont name from old `name` table if it exists
builder.addPSCIDFFNameFromNameTable(self.font.get("name"))
# Get PostScript Name from `CFF ` table if it exists
cffRecords = Workers.NameWorker.getRecordsFromCFF(self.font.get("CFF "))
cffPSname = None
if cffRecords:
cffPSname = cffRecords[3].decode()
# Basic name records's initialization
# From here the English font family always exists.
enFamily = en.get("fontFamily")
enSubfamily = u"R" # Default English subfamily
enLgcFmly = enFamily # Default English legacy family
enWWS = [None, None, None] # [enWidth, enWeight, enItalic]
enFullName = psName = versionStr = uniqueID = None
# Add style-links, generate English subfamily and legacy family
if style:
slCode = style.get("styleLink")
widthScale = style.get("widthScale")
weightScale = style.get("weightScale")
italicAngle = style.get("italicAngle")
# Try to get width, weight and italic string.
if widthScale in range(1, 10) and widthScale != 5:
enWWS[0] = Constants.ABBREVIATED_WIDTHS[widthScale - 1].decode()
if weightScale in range(1, 11):
enWWS[1] = Constants.ABBREVIATED_WEIGHTS[weightScale - 1].decode()
if (isinstance(italicAngle, float) or isinstance(italicAngle, int)) and \
italicAngle != 0:
enWWS[2] = u"It"
# Fill English subfamily with abbreviated strings from above
isFirst = True
for item in enWWS:
if item:
if isFirst:
isFirst = False
enSubfamily = item
else:
enSubfamily += u" " + item
# Add style-link and generate English legacy family
# Version 1.3.4 update: now style-link only affects Win platform.
if enWWS[0]: # enWidth
enLgcFmly += u" " + enWWS[0]
if slCode == Constants.STYLELINK_REGULAR:
builder.addStylelink(slCode)
if weightScale and weightScale != 4:
enLgcFmly += u" " + enWWS[1] # enWeight
elif slCode == Constants.STYLELINK_BOLD:
builder.addStylelink(slCode)
elif slCode == Constants.STYLELINK_ITALIC:
builder.addStylelink(slCode)
if weightScale:
enLgcFmly += u" " + enWWS[1]
elif slCode == Constants.STYLELINK_BOLDITALIC:
builder.addStylelink(slCode)
else:
builder.addStylelink(Constants.STYLELINK_NONE)
if enWWS[1]: # enWeight
enLgcFmly += u" " + enWWS[1]
if enWWS[2]: # enItalic
enLgcFmly += u" " + enWWS[2]
else:
builder.addStylelink(Constants.STYLELINK_NONE)
# Get English subfamily and legacy family from configuration
if self.__loadUstr(en.get("fontSubfamily")):
# Deal with Windows subfamily
enSubfamily = self.__loadUstr(en.get("fontSubfamily"))
# Generate English legacy family from enSubfamily and style-links
enLgcFmly = enFamily + u" " + enSubfamily
if style:
slCode = style.get("styleLink")
if slCode == Constants.STYLELINK_REGULAR:
for styl in Constants.REGULAR_STYLES:
# Use regex for case-insensitive removal
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
for styl in Constants.CJK_REGULAR_WEIGHTS:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
elif slCode == Constants.STYLELINK_BOLD:
for styl in Constants.BOLD_STYLES:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
for styl in Constants.CJK_BOLD_WEIGHTS:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
elif slCode == Constants.STYLELINK_ITALIC: # Which represents for "Regular Italic"
for styl in Constants.REGULAR_STYLES:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
for styl in Constants.CJK_REGULAR_WEIGHTS:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
for styl in Constants.ITALIC_STYLES:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
elif slCode == Constants.STYLELINK_BOLDITALIC:
for styl in Constants.BOLD_STYLES:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
for styl in Constants.CJK_BOLD_WEIGHTS:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
for styl in Constants.ITALIC_STYLES:
enLgcFmly = re.sub(r"(?i)\b" + styl + r"\b", "", enLgcFmly)
else:
pass
while enLgcFmly != enLgcFmly.replace(u" ", u" "):
enLgcFmly = enLgcFmly.replace(u" ", u" ")
enLgcFmly = enLgcFmly.strip()
# Deal with fullName with priority below:
# family + subfamily < *specified*
enFullName = enFamily + u" " + enSubfamily
if self.__loadUstr(en.get("fontFullName")):
enFullName = self.__loadUstr(en.get("fontFullName"))
# Deal with psName with priority below:
# fullName < cffPSname < *specified*
# Incompatible chars will be discarded
psName = enFamily.replace(u" ", u"") + u"-" + enSubfamily.replace(u" ", u"")
if cffPSname:
psName = cffPSname
if self.__loadUstr(en.get("postScriptName")):
psName = self.__loadUstr(en.get("postScriptName"))
# Deal with versionStr with priority below:
# `head`.fontRevision < General.version < *specified*
# Strings without the decimal dot will be added
versionStr = Workers.NameWorker.getVersionString(self.font["head"])
if general:
versionNum = general.get("version")
if isinstance(versionNum, float) or isinstance(versionNum, int):
versionStr = "Version " + "%.2f" % abs(versionNum)
if self.__loadUstr(en.get("versionString")):
versionStr = self.__loadUstr(en.get("versionString"))
# Deal with uniqueID with priority below:
# fullName + versionStr < *specified*
uniqueID = enFullName + u"; " + versionStr
if self.__loadUstr(en.get("uniqueID")):
uniqueID = self.__loadUstr(en.get("uniqueID"))
# Build English part of `name`
# Family and subfamily
builder.addMacNameEx(enFamily, 1, 0)
builder.addMacNameEx(enSubfamily, 2, 0)
builder.addWinNameEx(enLgcFmly, 1, 0x0409)
# name ID 2 has been already added by addStylelink()
builder.addWinNameEx(enFamily, 16, 0x0409)
builder.addWinNameEx(enSubfamily, 17, 0x0409)
# Full name
builder.addEngName(enFullName, 4) # name ID 4 for both platforms
builder.addMacCompatibleFullEx(enFullName, 0) # name ID 18 for only Macintosh
# Other stuff
builder.addFontUniqueID(uniqueID) # name ID 3
builder.addVersionString(versionStr) # name ID 5
psName = builder.addPostScriptName(psName) # name ID | |
<gh_stars>1-10
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
PyBluesky - A simple python game to navigate your jet and fight though a massive missiles attack based on pygame framework.
Version: 1.0.0 (based on desktop release 1.0.5 ; changed version number for android release)
Author: <NAME> (ljnath)
Email: <EMAIL>
Website: https://ljnath.com
"""
import asyncio
import math
import random
import webbrowser
import pygame
from android import loadingscreen
from plyer import accelerometer, orientation, vibrator
from game.data.enums import Screen, StartChoice
from game.environment import GameEnvironment
from game.handlers.leaderboard import LeaderBoardHandler
from game.handlers.network import NetworkHandler
from game.sprites.cloud import Cloud
from game.sprites.jet import Jet
from game.sprites.missile import Missile
from game.sprites.samlauncher import SamLauncher
from game.sprites.star import Star
from game.sprites.text.input.name import NameInputText
from game.sprites.text import Text
from game.sprites.text.exitmenu import ExitMenuText
from game.sprites.text.gamemenu import GameMenuText
from game.sprites.text.help import HelpText
from game.sprites.text.leaderboard import LeaderBoardText
from game.sprites.text.replaymenu import ReplayMenuText
from game.sprites.text.score import ScoreText
from game.sprites.vegetation import Vegetation
API_KEY = ''
def check_update() -> None:
"""
Method to check for game update
"""
network_handler = NetworkHandler(API_KEY)
asyncio.get_event_loop().run_until_complete(network_handler.check_game_update())
asyncio.get_event_loop().run_until_complete(LeaderBoardHandler().update(API_KEY))
def submit_result() -> None:
"""
Method to submit game score to remote server
"""
game_env = GameEnvironment()
if game_env.dynamic.game_score > 0:
network_handler = NetworkHandler(API_KEY)
asyncio.get_event_loop().run_until_complete(network_handler.submit_result())
asyncio.get_event_loop().run_until_complete(LeaderBoardHandler().update(API_KEY))
def create_vegetation(vegetations) -> None:
"""
Method to create vegetation
"""
game_env = GameEnvironment()
vegetations.empty()
for i in range(math.ceil(game_env.static.screen_width / game_env.vegetation_size[0])): # drawing the 1st vegetations required to fill the 1st sceen (max is the screen width)
vegetation = Vegetation(x_pos=i * game_env.vegetation_size[0] + game_env.vegetation_size[0] / 2) # creating a new vegetation
vegetations.add(vegetation) # just adding sprite to vegetations group, to updating on screen for now
def notify_user_of_update() -> None:
"""
Method to open the webbrowser when an new update is available
"""
game_env = GameEnvironment()
if game_env.dynamic.update_url:
try:
webbrowser.open(game_env.dynamic.update_url)
except Exception:
pass
def get_hint_sprite(hint_message: str) -> None:
"""
Method to create hint text
"""
game_env = GameEnvironment()
return Text(f'HINT: {hint_message}', 26, pos_x=game_env.static.screen_width / 2, pos_y=game_env.static.screen_height - 30) # creating game hint message
def play():
pygame.mixer.init() # initializing same audio mixer with default settings
pygame.init() # initializing pygame
game_env = GameEnvironment() # initializing game environment
game_env.dynamic.collision_sound.set_volume(1.5)
game_env.dynamic.levelup_sound.set_volume(1.5)
game_env.dynamic.shoot_sound.set_volume(1.5)
game_env.dynamic.hit_sound.set_volume(3)
game_env.dynamic.powerup_sound.set_volume(10)
game_env.dynamic.samfire_sound.set_volume(5)
# setting main game background musicm
# lopping the main game music and setting game volume
pygame.mixer.music.load(game_env.static.game_sound.get('music'))
pygame.mixer.music.play(loops=-1)
pygame.mixer.music.set_volume(.2)
# settings flags to create screen in fullscreen, use HW-accleration and DoubleBuffer
flags = pygame.FULLSCREEN | pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.SCALED | pygame.RESIZABLE
# creating game screen with custom width and height
screen = pygame.display.set_mode((game_env.static.screen_width, game_env.static.screen_height), flags)
pygame.display.set_caption('{} version. {}'.format(game_env.static.name, game_env.static.version)) # setting name of game window
pygame.mouse.set_visible(False) # hiding the mouse pointer from the game screen
gameclock = pygame.time.Clock() # setting up game clock to maintain constant fps
check_update()
ADD_CLOUD = pygame.USEREVENT + 1 # creating custom event to automatically add cloud in the screen
pygame.time.set_timer(ADD_CLOUD, int(1000 / game_env.static.cloud_per_sec)) # setting event to auto-trigger every 1s; 1 cloud will be created every second
ADD_MISSILE = pygame.USEREVENT + 2 # creating custom event to automatically add missiles in the screen
pygame.time.set_timer(ADD_MISSILE, int(1000 / game_env.static.missile_per_sec)) # setting event to auto-trigger every 500ms; 2 missiles will be created every second
ADD_SAM_LAUNCHER = pygame.USEREVENT + 3 # creating custom event to automatically add SAM-LAUNCHER in the screen
pygame.time.set_timer(ADD_SAM_LAUNCHER, 5000) # setting event to auto-trigger every 5s; 1 level can have 4 sam launcher
running = True # game running variable
gameover = False # no gameover by default
game_started = False # game is not started by default
game_pause = False
star_shown = False
user_has_swipped = False
screen_color = game_env.static.background_default if game_started else game_env.static.background_special
# blocking all the undesired events
pygame.event.set_blocked(pygame.FINGERMOTION)
pygame.event.set_blocked(pygame.FINGERUP)
pygame.event.set_blocked(pygame.FINGERDOWN)
pygame.event.set_blocked(pygame.MOUSEBUTTONDOWN)
pygame.event.set_blocked(pygame.MOUSEMOTION)
pygame.event.set_blocked(pygame.KEYUP)
pygame.event.set_blocked(ADD_MISSILE)
pygame.event.set_blocked(ADD_SAM_LAUNCHER)
backgrounds = pygame.sprite.Group() # creating seperate group for background sprites
stars = pygame.sprite.GroupSingle() # group of stars with max 1 sprite
vegetations = pygame.sprite.Group() # creating cloud group for storing all the clouds in the game
clouds = pygame.sprite.Group() # creating cloud group for storing all the clouds in the game
missiles = pygame.sprite.Group() # creating missile group for storing all the missiles in the game
deactivated_missile = pygame.sprite.Group() # creating missile group for storing all the deactivated missiles in the game
samlaunchers = pygame.sprite.GroupSingle() # creating missile group for storing all the samlaunchers in the game
title_sprites = pygame.sprite.Group()
hint_sprite = get_hint_sprite("Swipe your finger to know more") # creating game hint message
title_banner_sprite = Text("{} {}".format(game_env.static.name, game_env.static.version), 100, pos_x=game_env.static.screen_width / 2, pos_y=100) # creating title_banner_sprite text sprite with game name
title_author_sprite = Text("By <NAME> aka ljnath", 28, pos_x=game_env.static.screen_width / 2, pos_y=150) # creating game author
swipe_navigated_menus = {
Screen.GAME_MENU: GameMenuText(),
Screen.HELP: HelpText(),
Screen.LEADERBOARD: LeaderBoardText()
}
selected_menu_index = 0
# showing regular game menus if user has entered the player name
if game_env.dynamic.player_name:
game_env.dynamic.all_sprites.add(hint_sprite)
active_sprite = swipe_navigated_menus[Screen.GAME_MENU]
pygame.event.set_allowed(pygame.MOUSEMOTION)
else:
# else showing the screen for user to enter the player name
active_sprite = NameInputText()
game_env.dynamic.active_screen = Screen.NAME_INPUT
[title_sprites.add(sprite) for sprite in (active_sprite, title_banner_sprite, title_author_sprite)] # adding all the necessary sprites to title_sprites
[game_env.dynamic.all_sprites.add(sprite) for sprite in title_sprites] # adding all title_sprites sprite to all_sprites
jet = Jet() # creating jet sprite
scoretext_sprite = ScoreText() # creating scoreboard sprite
game_env.dynamic.noammo_sprite = Text("NO AMMO !!!", 30) # creating noammo-sprite
create_vegetation(vegetations)
menu_screens = {Screen.REPLAY_MENU, Screen.GAME_MENU, Screen.EXIT_MENU}
last_active_sprite = (game_env.dynamic.active_screen, active_sprite)
def start_gameplay():
nonlocal gameover, jet, star_shown, screen_color, game_started, ADD_MISSILE, ADD_SAM_LAUNCHER
pygame.event.set_blocked(game_env.MOUSEMOTION)
pygame.event.set_allowed(ADD_MISSILE)
pygame.event.set_allowed(ADD_SAM_LAUNCHER)
screen_color = game_env.static.background_default # restoring screen color
[sprite.kill() for sprite in title_sprites] # kill all the title_sprites sprite sprite
jet = Jet() # re-creating the jet
missiles.empty() # empting the missle group
game_env.dynamic.all_sprites = pygame.sprite.Group() # re-creating group of sprites
[game_env.dynamic.all_sprites.remove(sprite) for sprite in (active_sprite, hint_sprite)] # removing active sprite and hint sprite
[game_env.dynamic.all_sprites.add(sprite) for sprite in (jet, scoretext_sprite)] # adding the jet and scoreboard to all_sprites
game_env.reset() # reseting game data
pygame.time.set_timer(ADD_MISSILE, int(1000 / game_env.static.missile_per_sec)) # resetting missile creation event timer
create_vegetation(vegetations) # creating vegetation
[backgrounds.add(sprite) for sprite in vegetations.sprites()] # adding vegetation to background
game_env.dynamic.active_screen = Screen.GAME_SCREEN # setting gamescreen as the active sprite
game_started = True # game has started
gameover = False # game is not over yet
star_shown = False # no star is displayed
# enabling acclerometer sensor to get accleration sensor data
accelerometer.enable()
# Main game loop
while running:
# getting the accleration sensor data from accelerometer
# acceleration_sensor_values is a tuple of (x, y, z) sensor data
acceleration_sensor_values = accelerometer.acceleration
# this variable is updated in case of a MOUSEMOTION; in subsequent MOUSEBUTTONUP event,
# it is checked if the position of both these events are the same.
# if yes, this indicates that these are part of same motion and the MOUSEBUTTONUP event can be discarded
last_touch_position = (0, 0)
# Look at every event in the queue
for event in pygame.event.get():
# checking for VIDEORESIZE event, this event is used to prevent auto-rotate in android device
# if any change in the screensize is detected, then the orienatation is forcefully re-applied
if event.type == game_env.VIDEORESIZE:
orientation.set_landscape(reverse=False)
# handling keydown event to show the pause menu
elif event.type == game_env.KEYDOWN:
if game_env.dynamic.active_screen != Screen.EXIT_MENU and pygame.key.name(event.key) == 'AC Back':
pygame.mixer.music.pause()
last_active_screen = game_env.dynamic.active_screen
last_active_sprite = active_sprite
game_started, game_pause = game_pause, game_started
[game_env.dynamic.all_sprites.remove(sprite) for sprite in (active_sprite, hint_sprite)]
active_sprite = ExitMenuText()
game_env.dynamic.all_sprites.add(active_sprite)
game_env.dynamic.active_screen = Screen.EXIT_MENU
# handling the textinput event to allow user | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from django import forms
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.template.defaultfilters import date as _date
from django.utils import translation
from django.utils.timezone import now
from raven.contrib.django.models import client
from django.contrib.humanize.templatetags.humanize import intcomma
from django.template.defaultfilters import floatformat
from schwifty import IBAN
import datetime
import string
from PIL import Image, ImageDraw, ImageFont
import os
from iso4217 import Currency
from accounting_core.models import AccountingGroupModels
from accounting_core.utils import AccountingYearLinked, CostCenterLinked
from app.utils import get_current_year, get_current_unit
from generic.models import GenericModel, GenericStateModel, FalseFK, GenericContactableModel, GenericGroupsModel, GenericExternalUnitAllowed, GenericModelWithLines, ModelUsedAsLine, GenericModelWithFiles, GenericTaggableObject, GenericAccountingStateModel, LinkedInfoModel, SearchableModel
from notifications.utils import notify_people, unotify_people
from rights.utils import UnitExternalEditableModel, UnitEditableModel, AgepolyEditableModel
class _Subvention(GenericModel, GenericModelWithFiles, GenericModelWithLines, AccountingYearLinked, GenericStateModel, GenericGroupsModel, UnitExternalEditableModel, GenericExternalUnitAllowed, GenericContactableModel, SearchableModel):
SUBVENTION_TYPE = (
('subvention', _(u'Subvention')),
('sponsorship', _(u'Sponsoring')),
)
class MetaRightsUnit(UnitExternalEditableModel.MetaRightsUnit):
access = 'TRESORERIE'
world_ro_access = False
name = models.CharField(_(u'Nom du projet'), max_length=255)
amount_asked = models.IntegerField(_(u'Montant demandé'))
amount_given = models.IntegerField(_(u'Montant attribué'), blank=True, null=True)
mobility_asked = models.IntegerField(_(u'Montant mobilité demandé'), blank=True, null=True)
mobility_given = models.IntegerField(_(u'Montant mobilité attribué'), blank=True, null=True)
description = models.TextField(_('Description'), blank=True, null=True)
comment_root = models.TextField(_('Commentaire AGEPoly'), blank=True, null=True)
kind = models.CharField(_(u'Type de soutien'), max_length=15, choices=SUBVENTION_TYPE, blank=True, null=True)
linked_budget = FalseFK('accounting_main.models.Budget', verbose_name=_(u'Budget annuel lié'), blank=True, null=True)
class Meta:
abstract = True
# unique_together = (("unit", "unit_blank_name", "accounting_year"),)
class MetaEdit:
only_if = {
'linked_budget': lambda (obj, user): obj.unit,
}
files_title = _(u'Fichiers')
files_help = _(u"""Envoie les fichiers nécessaires pour ta demande de subvention, les demandes incomplètes ne seront pas considérées.<br />
En cas de question, merci de contacter <a href="mailto:<EMAIL>"><EMAIL></a>.<br /><br />
Vous devez inclure dans votre demande au moins :
<ul>
<li>Budget du projet. Un document complémentaire détaillant et expliquant le budget est vivement recommandé.</li>
<li>Bilans et comptes des d'activité des années précédentes</li>
<li>Documents officiels (pour les Association hors AGEPoly) : statuts, liste des membres du comité, PV de la dernière AG</li>
</ul>
Ces différents documents sont demandés au format PDF dans la mesure du possible, afin d'éviter les problèmes d'ouvertures et de mise en page.""")
class MetaData:
list_display = [
('name', _(u'Projet')),
('get_unit_name', _(u'Association / Commission')),
('amount_asked', _(u'Montant demandé')),
('mobility_asked', _(u'Mobilité demandé')),
('status', _(u'Statut')),
]
default_sort = "[2, 'asc']" # unit
filter_fields = ('name', 'unit__name', 'unit_blank_name')
details_display = list_display + [('description', _(u'Description')), ('accounting_year', _(u'Année comptable'))]
details_display.insert(3, ('amount_given', _(u'Montant attribué')))
details_display.insert(5, ('mobility_given', _(u'Montant mobilité attribué')))
extra_right_display = {'comment_root': lambda (obj, user): obj.rights_can('LIST', user)}
files_title = _(u'Fichiers')
base_title = _(u'Subvention')
list_title = _(u'Liste des demandes de subvention')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-gift'
menu_id = 'menu-compta-subventions'
not_sortable_columns = ['get_unit_name']
safe_fields = ['get_unit_name']
has_unit = True
forced_widths = {
'3': '150px',
'4': '150px',
}
help_list = _(u"""Les demandes de subvention peuvent être faites par toutes les commissions ou associations auprès de l'AGEPoly.""")
class MetaAccounting:
copiable = False
class MetaLines:
lines_objects = [
{
'title': _(u'Evènements'),
'class': 'accounting_tools.models.SubventionLine',
'form': 'accounting_tools.forms.SubventionLineForm',
'related_name': 'events',
'field': 'subvention',
'sortable': True,
'date_fields': ['start_date', 'end_date'],
'show_list': [
('name', _(u'Titre')),
('start_date', _(u'Du')),
('end_date', _(u'Au')),
('place', _(u'Lieu')),
('nb_spec', _(u'Nb personnes attendues')),
]
},
]
class MetaState:
states = {
'0_draft': _(u'Brouillon'),
'0_correct': _(u'A corriger'),
'1_submited': _(u'Demande soumise'),
'2_treated': _(u'Demande traitée'),
}
default = '0_draft'
states_texts = {
'0_draft': _(u'La demande est en cours de création et n\'est pas publique.'),
'1_submited': _(u'La demande a été soumise.'),
'0_correct': _(u'La demande doit être corrigée.'),
'2_treated': _(u'La demande a été traitée.'),
}
states_links = {
'0_draft': ['1_submited'],
'0_correct': ['1_submited'],
'1_submited': ['2_treated', '0_correct'],
'2_treated': [],
}
states_colors = {
'0_draft': 'primary',
'1_submited': 'default',
'0_correct': 'warning',
'2_treated': 'success',
}
states_icons = {
'0_draft': '',
'1_submited': '',
'0_correct': '',
'2_treated': '',
}
list_quick_switch = {
'0_draft': [('1_submited', 'fa fa-check', _(u'Soumettre la demande'))],
'0_correct': [('1_submited', 'fa fa-check', _(u'Soumettre la demande'))],
'1_submited': [('2_treated', 'fa fa-check', _(u'Marquer la demande comme traitée')), ('0_correct', 'fa fa-exclamation', _(u'Demander des corrections'))],
'2_treated': [],
}
forced_pos = {
'0_draft': (0.2, 0.15),
'0_correct': (0.5, 0.85),
'1_submited': (0.5, 0.15),
'2_treated': (0.8, 0.15),
}
states_default_filter = '0_draft,0_correct'
status_col_id = 5
class SubventionValidationForm(forms.Form):
amount_given = forms.IntegerField(label=_(u'Montant accordé'))
mobility_given = forms.IntegerField(label=_(u'Montant mobilité accordé'))
states_bonus_form = {
'2_treated': SubventionValidationForm
}
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u""
index_files = True
fields = [
'name',
'description',
'comment_root',
'amount_asked',
'amount_given',
'mobility_asked',
'mobility_given',
]
linked_lines = {
'events': ['name', 'place']
}
def __init__(self, *args, **kwargs):
super(_Subvention, self).__init__(*args, **kwargs)
self.MetaRights = type("MetaRights", (self.MetaRights,), {})
self.MetaRights.rights_update({
'EXPORT': _(u'Peut exporter les éléments'),
})
def switch_status_signal(self, request, old_status, dest_status):
s = super(_Subvention, self)
if hasattr(s, 'switch_status_signal'):
s.switch_status_signal(request, old_status, dest_status)
if dest_status == '2_treated':
self.amount_given = request.POST.get('amount_given', self.amount_given)
self.mobility_given = request.POST.get('mobility_given', self.mobility_given)
self.save()
def may_switch_to(self, user, dest_state):
return self.rights_can('EDIT', user) and super(_Subvention, self).may_switch_to(user, dest_state)
def can_switch_to(self, user, dest_state):
if self.status == '0_draft' and self.accounting_year.subvention_deadline and self.accounting_year.subvention_deadline < now() and not self.rights_in_root_unit(user, self.MetaRightsUnit.access):
return (False, _(u'Le délait est dépassé pour les subventions !'))
if self.status == '2_treated' and not user.is_superuser:
return (False, _(u'Seul un super utilisateur peut sortir cet élément de l\'état traité'))
if int(dest_state[0]) - int(self.status[0]) != 1 and not user.is_superuser:
if not (self.status == '1_submited' and dest_state == '0_correct'): # Exception faite de la correction
return (False, _(u'Seul un super utilisateur peut sauter des étapes ou revenir en arrière.'))
if self.status == '1_submited' and not self.rights_in_root_unit(user, self.MetaRightsUnit.access):
return (False, _(u'Seul un membre du Comité de Direction peut marquer la demande comme traitée ou à corriger.'))
if not self.rights_can('EDIT', user):
return (False, _('Pas les droits.'))
return super(_Subvention, self).can_switch_to(user, dest_state)
def __unicode__(self):
return u"{} {}".format(self.get_real_unit_name(), self.accounting_year)
def genericFormExtraClean(self, data, form):
"""Check that unique_together is fulfiled"""
from accounting_tools.models import Subvention
if Subvention.objects.exclude(pk=self.pk).filter(accounting_year=get_current_year(form.truffe_request), unit=get_current_unit(form.truffe_request), unit_blank_name=data['unit_blank_name'], deleted=False).count():
raise forms.ValidationError(_(u'Une demande de subvention pour cette unité existe déjà pour cette année comptable.')) # Potentiellement parmi les supprimées
def genericFormExtraInit(self, form, current_user, *args, **kwargs):
"""Remove fields that should be edited by CDD only."""
if not self.rights_in_root_unit(current_user, self.MetaRightsUnit.access):
for key in ['amount_given', 'mobility_given', 'comment_root']:
del form.fields[key]
form.fields['kind'].widget = forms.HiddenInput()
def rights_can_EXPORT(self, user):
return self.rights_in_root_unit(user)
def rights_can_EDIT(self, user):
if self.status[0] != '0' and not self.rights_in_root_unit(user):
return False
return super(_Subvention, self).rights_can_EDIT(user)
def get_real_unit_name(self):
return self.unit_blank_name or self.unit.name
def total_people(self):
"""Return the total number of expected people among all events"""
total = 0
for line in self.events.all():
total += line.nb_spec
return total
class SubventionLine(ModelUsedAsLine):
name = models.CharField(_(u'Nom de l\'évènement'), max_length=255)
start_date = models.DateField(_(u'Début de l\'évènement'))
end_date = models.DateField(_(u'Fin de l\'évènement'))
place = models.CharField(_(u'Lieu de l\'évènement'), max_length=100)
nb_spec = models.PositiveIntegerField(_(u'Nombre de personnes attendues'))
subvention = models.ForeignKey('Subvention', related_name="events", verbose_name=_(u'Subvention/sponsoring'))
def __unicode__(self):
return u"{}:{}".format(self.subvention.name, self.name)
class _Invoice(GenericModel, GenericStateModel, GenericTaggableObject, CostCenterLinked, GenericModelWithLines, GenericGroupsModel, GenericContactableModel, AccountingYearLinked, UnitEditableModel, SearchableModel):
"""Modèle pour les factures"""
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
access = ['TRESORERIE', 'SECRETARIAT']
class MetaRights(UnitEditableModel.MetaRights):
linked_unit_property = 'costcenter.unit'
def __init__(self, *args, **kwargs):
super(_Invoice, self).__init__(*args, **kwargs)
self.MetaRights.rights_update({
'DOWNLOAD_PDF': _(u'Peut exporter la facture en PDF'),
})
title = models.CharField(max_length=255)
custom_bvr_number = models.CharField(_(u'Numéro de BVR manuel'), help_text=_(u'Ne PAS utiliser un numéro aléatoire, mais utiliser un VRAI et UNIQUE numéro de BVR. Seulement pour des BVR physiques. Si pas renseigné, un numéro sera généré automatiquement. Il est possible de demander des BVR à Marianne.'), max_length=59, blank=True, null=True)
address = models.TextField(_('Adresse'), help_text=_(u'Exemple: \'<NAME> - Rue Des Canard 19 - 1015 Lausanne\''), blank=True, null=True)
date_and_place = models.CharField(_(u'Lieu et date'), max_length=512, blank=True, null=True)
preface = models.TextField(_(u'Introduction'), help_text=_(u'Texte affiché avant la liste. Exemple: \'Pour l\'achat du Yearbook 2014\' ou \'Chère Madame, - Par la présente, je me permets de vous remettre notre facture pour le financement de nos activités associatives pour l\'année académique 2014-2015.\''), blank=True, null=True)
ending = models.TextField(_(u'Conclusion'), help_text=_(u'Affiché après la liste, avant les moyens de paiements'), max_length=1024, blank=True, null=True)
display_bvr = models.BooleanField(_(u'Afficher paiement via BVR'), help_text=_(u'Affiche un BVR et le texte corespondant dans le PDF. Attention, le BVR généré n\'est pas utilisable à la poste ! (Il est possible d\'obtenir un \'vrai\' BVR via Marianne.)'), default=True)
display_account = models.BooleanField(_(u'Afficher paiement via compte'), help_text=_(u'Affiche le texte pour le paiement via le compte de l\'AGEPoly.'), default=True)
greetings = models.CharField(_(u'Salutations'), default='', max_length=1024, blank=True, null=True)
sign = models.TextField(_(u'Signature'), help_text=_(u'Titre de la zone de signature'), blank=True, null=True)
annex = models.BooleanField(_(u'Annexes'), help_text=_(u'Affiche \'Annexe(s): ment.\' en bas de la facture'), default=False)
delay = models.SmallIntegerField(_(u'Délai de paiement en jours'), default=30, help_text=_(u'Mettre zéro pour cacher le texte. Il s\'agit du nombre de jours de délai pour le | |
for the usual 9DOF sensor setup. This
message should contain the scaled values to the described
units
'''
def __init__(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SCALED_IMU, 'SCALED_IMU')
self._fieldnames = ['time_boot_ms', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag']
self.time_boot_ms = time_boot_ms
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
self.xgyro = xgyro
self.ygyro = ygyro
self.zgyro = zgyro
self.xmag = xmag
self.ymag = ymag
self.zmag = zmag
def pack(self, mav):
return MAVLink_message.pack(self, mav, 170, struct.pack('<Ihhhhhhhhh', self.time_boot_ms, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag))
class MAVLink_raw_imu_message(MAVLink_message):
'''
The RAW IMU readings for the usual 9DOF sensor setup. This
message should always contain the true raw values without any
scaling to allow data capture and system debugging.
'''
def __init__(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RAW_IMU, 'RAW_IMU')
self._fieldnames = ['time_usec', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag']
self.time_usec = time_usec
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
self.xgyro = xgyro
self.ygyro = ygyro
self.zgyro = zgyro
self.xmag = xmag
self.ymag = ymag
self.zmag = zmag
def pack(self, mav):
return MAVLink_message.pack(self, mav, 144, struct.pack('<Qhhhhhhhhh', self.time_usec, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag))
class MAVLink_raw_pressure_message(MAVLink_message):
'''
The RAW pressure readings for the typical setup of one
absolute pressure and one differential pressure sensor. The
sensor values should be the raw, UNSCALED ADC values.
'''
def __init__(self, time_usec, press_abs, press_diff1, press_diff2, temperature):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RAW_PRESSURE, 'RAW_PRESSURE')
self._fieldnames = ['time_usec', 'press_abs', 'press_diff1', 'press_diff2', 'temperature']
self.time_usec = time_usec
self.press_abs = press_abs
self.press_diff1 = press_diff1
self.press_diff2 = press_diff2
self.temperature = temperature
def pack(self, mav):
return MAVLink_message.pack(self, mav, 67, struct.pack('<Qhhhh', self.time_usec, self.press_abs, self.press_diff1, self.press_diff2, self.temperature))
class MAVLink_scaled_pressure_message(MAVLink_message):
'''
The pressure readings for the typical setup of one absolute
and differential pressure sensor. The units are as specified
in each field.
'''
def __init__(self, time_boot_ms, press_abs, press_diff, temperature):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SCALED_PRESSURE, 'SCALED_PRESSURE')
self._fieldnames = ['time_boot_ms', 'press_abs', 'press_diff', 'temperature']
self.time_boot_ms = time_boot_ms
self.press_abs = press_abs
self.press_diff = press_diff
self.temperature = temperature
def pack(self, mav):
return MAVLink_message.pack(self, mav, 115, struct.pack('<Iffh', self.time_boot_ms, self.press_abs, self.press_diff, self.temperature))
class MAVLink_attitude_message(MAVLink_message):
'''
The attitude in the aeronautical frame (right-handed, Z-down,
X-front, Y-right).
'''
def __init__(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_ATTITUDE, 'ATTITUDE')
self._fieldnames = ['time_boot_ms', 'roll', 'pitch', 'yaw', 'rollspeed', 'pitchspeed', 'yawspeed']
self.time_boot_ms = time_boot_ms
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.rollspeed = rollspeed
self.pitchspeed = pitchspeed
self.yawspeed = yawspeed
def pack(self, mav):
return MAVLink_message.pack(self, mav, 39, struct.pack('<Iffffff', self.time_boot_ms, self.roll, self.pitch, self.yaw, self.rollspeed, self.pitchspeed, self.yawspeed))
class MAVLink_attitude_quaternion_message(MAVLink_message):
'''
The attitude in the aeronautical frame (right-handed, Z-down,
X-front, Y-right), expressed as quaternion.
'''
def __init__(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_ATTITUDE_QUATERNION, 'ATTITUDE_QUATERNION')
self._fieldnames = ['time_boot_ms', 'q1', 'q2', 'q3', 'q4', 'rollspeed', 'pitchspeed', 'yawspeed']
self.time_boot_ms = time_boot_ms
self.q1 = q1
self.q2 = q2
self.q3 = q3
self.q4 = q4
self.rollspeed = rollspeed
self.pitchspeed = pitchspeed
self.yawspeed = yawspeed
def pack(self, mav):
return MAVLink_message.pack(self, mav, 246, struct.pack('<Ifffffff', self.time_boot_ms, self.q1, self.q2, self.q3, self.q4, self.rollspeed, self.pitchspeed, self.yawspeed))
class MAVLink_local_position_ned_message(MAVLink_message):
'''
The filtered local position (e.g. fused computer vision and
accelerometers). Coordinate frame is right-handed, Z-axis down
(aeronautical frame, NED / north-east-down convention)
'''
def __init__(self, time_boot_ms, x, y, z, vx, vy, vz):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_LOCAL_POSITION_NED, 'LOCAL_POSITION_NED')
self._fieldnames = ['time_boot_ms', 'x', 'y', 'z', 'vx', 'vy', 'vz']
self.time_boot_ms = time_boot_ms
self.x = x
self.y = y
self.z = z
self.vx = vx
self.vy = vy
self.vz = vz
def pack(self, mav):
return MAVLink_message.pack(self, mav, 185, struct.pack('<Iffffff', self.time_boot_ms, self.x, self.y, self.z, self.vx, self.vy, self.vz))
class MAVLink_global_position_int_message(MAVLink_message):
'''
The filtered global position (e.g. fused GPS and
accelerometers). The position is in GPS-frame (right-handed,
Z-up). It is designed as scaled integer message
since the resolution of float is not sufficient.
'''
def __init__(self, time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_POSITION_INT, 'GLOBAL_POSITION_INT')
self._fieldnames = ['time_boot_ms', 'lat', 'lon', 'alt', 'relative_alt', 'vx', 'vy', 'vz', 'hdg']
self.time_boot_ms = time_boot_ms
self.lat = lat
self.lon = lon
self.alt = alt
self.relative_alt = relative_alt
self.vx = vx
self.vy = vy
self.vz = vz
self.hdg = hdg
def pack(self, mav):
return MAVLink_message.pack(self, mav, 104, struct.pack('<IiiiihhhH', self.time_boot_ms, self.lat, self.lon, self.alt, self.relative_alt, self.vx, self.vy, self.vz, self.hdg))
class MAVLink_rc_channels_scaled_message(MAVLink_message):
'''
The scaled values of the RC channels received. (-100%) -10000,
(0%) 0, (100%) 10000. Channels that are inactive should be set
to 65535.
'''
def __init__(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_SCALED, 'RC_CHANNELS_SCALED')
self._fieldnames = ['time_boot_ms', 'port', 'chan1_scaled', 'chan2_scaled', 'chan3_scaled', 'chan4_scaled', 'chan5_scaled', 'chan6_scaled', 'chan7_scaled', 'chan8_scaled', 'rssi']
self.time_boot_ms = time_boot_ms
self.port = port
self.chan1_scaled = chan1_scaled
self.chan2_scaled = chan2_scaled
self.chan3_scaled = chan3_scaled
self.chan4_scaled = chan4_scaled
self.chan5_scaled = chan5_scaled
self.chan6_scaled = chan6_scaled
self.chan7_scaled = chan7_scaled
self.chan8_scaled = chan8_scaled
self.rssi = rssi
def pack(self, mav):
return MAVLink_message.pack(self, mav, 237, struct.pack('<IhhhhhhhhBB', self.time_boot_ms, self.chan1_scaled, self.chan2_scaled, self.chan3_scaled, self.chan4_scaled, self.chan5_scaled, self.chan6_scaled, self.chan7_scaled, self.chan8_scaled, self.port, self.rssi))
class MAVLink_rc_channels_raw_message(MAVLink_message):
'''
The RAW values of the RC channels received. The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%. Individual receivers/transmitters might
violate this specification.
'''
def __init__(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_RAW, 'RC_CHANNELS_RAW')
self._fieldnames = ['time_boot_ms', 'port', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw', 'rssi']
self.time_boot_ms = time_boot_ms
self.port = port
self.chan1_raw = chan1_raw
self.chan2_raw = chan2_raw
self.chan3_raw = chan3_raw
self.chan4_raw = chan4_raw
self.chan5_raw = chan5_raw
self.chan6_raw = chan6_raw
self.chan7_raw = chan7_raw
self.chan8_raw = chan8_raw
self.rssi = rssi
def pack(self, mav):
return MAVLink_message.pack(self, mav, 244, struct.pack('<IHHHHHHHHBB', self.time_boot_ms, self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.port, self.rssi))
class MAVLink_servo_output_raw_message(MAVLink_message):
'''
The RAW values of the servo outputs (for RC input from the
remote, use the RC_CHANNELS messages). The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%.
'''
def __init__(self, time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SERVO_OUTPUT_RAW, 'SERVO_OUTPUT_RAW')
self._fieldnames = ['time_boot_ms', 'port', 'servo1_raw', 'servo2_raw', 'servo3_raw', 'servo4_raw', 'servo5_raw', 'servo6_raw', 'servo7_raw', 'servo8_raw']
self.time_boot_ms = time_boot_ms
self.port = port
self.servo1_raw = servo1_raw
self.servo2_raw = servo2_raw
self.servo3_raw = servo3_raw
self.servo4_raw = servo4_raw
self.servo5_raw = servo5_raw
self.servo6_raw = servo6_raw
self.servo7_raw = servo7_raw
self.servo8_raw = servo8_raw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 242, struct.pack('<IHHHHHHHHB', self.time_boot_ms, self.servo1_raw, self.servo2_raw, self.servo3_raw, self.servo4_raw, self.servo5_raw, self.servo6_raw, self.servo7_raw, self.servo8_raw, self.port))
class MAVLink_mission_request_partial_list_message(MAVLink_message):
'''
Request a partial list of mission items from the
system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol. If start
and end index are the same, just send one waypoint.
'''
def __init__(self, target_system, target_component, start_index, end_index):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_REQUEST_PARTIAL_LIST, 'MISSION_REQUEST_PARTIAL_LIST')
self._fieldnames = ['target_system', 'target_component', 'start_index', 'end_index']
self.target_system = target_system
self.target_component = target_component
self.start_index = start_index
self.end_index = end_index
def pack(self, mav):
return MAVLink_message.pack(self, mav, 212, struct.pack('<hhBB', self.start_index, self.end_index, self.target_system, self.target_component))
class MAVLink_mission_write_partial_list_message(MAVLink_message):
'''
This message is sent to the MAV to write a partial list. If
start index == end index, only one item will be transmitted /
updated. If the start index is NOT 0 and above the current
list size, this request should be REJECTED!
'''
def __init__(self, target_system, target_component, start_index, end_index):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_WRITE_PARTIAL_LIST, 'MISSION_WRITE_PARTIAL_LIST')
self._fieldnames = ['target_system', 'target_component', 'start_index', 'end_index']
self.target_system = target_system
self.target_component = target_component
self.start_index = start_index
self.end_index = end_index
def pack(self, mav):
return MAVLink_message.pack(self, mav, 9, struct.pack('<hhBB', self.start_index, self.end_index, self.target_system, self.target_component))
class MAVLink_mission_item_message(MAVLink_message):
'''
Message encoding a mission item. This message is emitted to
announce the presence of a mission item and to
set a mission item on the system. The mission item can be
either in x, y, z meters (type: LOCAL) or x:lat, y:lon,
z:altitude. Local frame is Z-down, right handed (NED), global
frame is Z-up, right handed (ENU). See also
http://qgroundcontrol.org/mavlink/waypoint_protocol.
'''
def __init__(self, target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_ITEM, 'MISSION_ITEM')
self._fieldnames = ['target_system', 'target_component', 'seq', 'frame', 'command', 'current', 'autocontinue', 'param1', 'param2', 'param3', 'param4', 'x', 'y', 'z']
self.target_system = target_system
self.target_component = target_component
self.seq = seq
self.frame = frame
self.command = command
self.current = current
self.autocontinue = autocontinue
self.param1 = param1
self.param2 = param2
self.param3 = param3
self.param4 = | |
<reponame>m-tmatma/svnmailer<filename>src/lib/svnmailer/settings/_accessors.py
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0201,W0232,W0613
# pylint-version = 0.7.0
#
# Copyright 2004-2005 <NAME> or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
====================
Member Descriptors
====================
This module defines the settings member descriptors used by the svnmailer.
"""
__author__ = "<NAME>"
__docformat__ = "restructuredtext en"
__all__ = [
'UnicodeMember',
'StringMember',
'IntegerMember',
'BooleanMember',
'HumanBooleanMember',
'RegexMember',
'TokenMember',
'TokenlistMember',
'FilenameMember',
'CommandlineMember',
'QuotedstringMember',
'StdinMember',
'MailactionMember',
]
# global imports
import re, sys
from svnmailer import util
from svnmailer.settings import _base
class UnicodeMember(_base.BasePostmapMember):
""" Unicode object storage """
def doTransform(self, value):
""" Transforms the value to unicode if it wasn't already
:Exceptions:
- `TypeError`: The supplied value is neither ``str`` nor
``unicode``
- `UnicodeError`: The supplied value is a string and cannot
be interpreted as the specified charset
"""
if isinstance(value, str):
value = unicode(value, self.CHARSET)
elif not isinstance(value, unicode):
raise TypeError(
"Supplied value must be string or unicode, not %r" %
type(value).__name__
)
return value
def doSubstitute(self, value, subst):
""" Substitutes the value """
return util.substitute(value, subst)
def doPostmap(self, value):
""" Maps the value
:Exceptions:
- `TypeError`: The mapped value is neither ``str`` nor
``unicode``
- `UnicodeError`: The mapped value is a string and cannot
be interpreted as the specified charset
"""
return self.transform(self.mapper(value))
class StringMember(_base.BaseMember):
""" String storage """
def doTransform(self, value):
""" Turns into a string """
return str(value)
class IntegerMember(_base.BaseMember):
""" Integer storage """
def doTransform(self, value):
""" Turns into an int
:Exceptions:
- `TypeError`: The supplied value is not convertable
- `ValueError`: The supplied value is not convertable
"""
return int(value)
class BooleanMember(_base.BaseMember):
""" Boolean storage """
def doTransform(self, value):
""" Turns into a boolean """
return bool(value)
class HumanBooleanMember(_base.BaseMember):
""" Boolean storage with translater from human readable booleans
:CVariables:
- `_TRUE`: The true words (``('word', ...)``)
- `_FALSE`: The false words (``('word', ...)``)
:IVariables:
- `_human`: The dictionary containing true and false keys
:Types:
- `_TRUE`: ``tuple``
- `_FALSE`: ``tuple``
- `_human`: ``dict``
"""
_TRUE = ('1', 'yes', 'on', 'true')
_FALSE = ('', '0', 'no', 'off', 'false', 'none')
def init(self):
""" Custom initialization """
super(HumanBooleanMember, self).init()
self._human = dict.fromkeys(self._TRUE, True)
self._human.update(dict.fromkeys(self._FALSE, False))
def doTransform(self, value):
""" Turns into boolean
:exception ValueError: The supplied value was not recognized as
human boolean
"""
try:
return self._human[str(value).lower()]
except KeyError:
raise ValueError(
"Value %r means neither 'true' nor 'false'" % value
)
class RegexMember(_base.BasePremapMember):
""" Regex storage
:ivar _flags: The flags for the regex compiler
:type _flags: ``int``
"""
def init(self):
""" Custom initialization """
super(RegexMember, self).init()
self._flags = self.param.get('flags', 0)
def doTransform(self, value):
""" Turns into a regex
:Exceptions:
- `TypeError`: Invalid type of value or the flags are broken
- `UnicodeError`: The supplied value was a ``str`` and
could not be converted to ``unicode``
- `ValueError`: The regex could not be compiled
"""
if isinstance(value, str):
value = unicode(value, self.CHARSET)
try:
value = re.compile(value, self._flags)
except re.error:
raise ValueError("Regex %r could not be compiled" % value)
return value
class TokenMember(_base.BasePremapMember):
""" Unicode token storage
:ivar `_allowed`: List of allowed tokens (or ``None``) - saved in a
dict for faster lookup
:type `_allowed`: ``dict``
"""
def init(self):
""" Custom initialization """
super(TokenMember, self).init()
allowed = self.param.get('allowed')
if allowed:
self._allowed = dict.fromkeys([token.lower() for token in allowed])
else:
self._allowed = None
def doTransform(self, value):
""" Transforms the value to unicode if it wasn't already
:Exceptions:
- `TypeError`: The supplied value is neither ``str`` nor
``unicode``
- `UnicodeError`: The supplied value is a string and cannot
be interpreted as the specified charset
- `ValueError`: The supplied value is not allowed
"""
if isinstance(value, str):
value = unicode(value, self.CHARSET)
elif not isinstance(value, unicode):
raise TypeError(
"Supplied value must be string or unicode, not %r" %
type(value).__name__
)
value = value.lower()
if self._allowed is not None and value and \
not self._allowed.has_key(value):
raise ValueError(
"Supplied token %r is not allowed" % value
)
return value
class TokenList(tuple):
""" represents a token list """
pass
class TokenlistMember(_base.BasePostmapMember):
""" Unicode token list storage
:ivar `_allowed`: List of allowed tokens (or ``None``) - saved in a
dict for faster lookup
:type _allowed: ``dict``
"""
def init(self):
""" Custom initialization """
super(TokenlistMember, self).init()
allowed = self.param.get('allowed')
if allowed:
self._allowed = dict.fromkeys([token.lower() for token in allowed])
else:
self._allowed = None
def doTransform(self, value):
""" Turns into a token list
:Exceptions:
- `UnicodeError`: The supplied value was a ``str`` and could
not be converted to ``unicode``
- `TypeError`: The input value is neither ``str`` nor ``unicode``
nor a `TokenList`
- `ValueError`: At least one of the tokens is not allowed
"""
if not isinstance(value, TokenList):
if isinstance(value, str):
value = unicode(value, self.CHARSET)
elif not isinstance(value, unicode):
raise TypeError(
"Supplied value must be string or unicode, not %r" %
type(value).__name__
)
value = TokenList(value.split())
if self._allowed is not None and (not self.MAP or self.mapper is None):
self._checkallowed(value)
return value
def doSubstitute(self, value, subst):
""" Substitutes the items """
return TokenList([util.substitute(token, subst) for token in value])
def doPostmap(self, value):
""" Maps the items """
result = []
for token in value:
token = self.mapper(token)
if isinstance(token, str):
token = unicode(token, self.CHARSET)
result.append(token)
value = TokenList(result)
if self._allowed is not None:
self._checkallowed(value)
return value
def _checkallowed(self, value):
""" Checks if the tokens are allowed
:param value: The token list
:type value: `TokenList`
:exception ValueError: A token was invalid
"""
for token in value:
if not self._allowed.has_key(token.lower()):
raise ValueError(
"Supplied token %r is not allowed" % token
)
class FilenameMember(_base.BasePremapMember):
""" Filename storage """
def doTransform(self, value):
""" Stores a file name either as ``str`` or ``unicode`` (depends on OS)
:Exceptions:
- `TypeError`: The supplied value is neither ``str`` nor
``unicode``
- `UnicodeError`: The supplied value cannot be recoded
"""
if not hasattr(value, '_already_recoded_filename'):
if isinstance(value, basestring):
value = util.filename.toLocale(
value, self.CHARSET, self.FILECHARSET
)
class Filename(type(value)):
""" Designates the recoded file name """
_already_recoded_filename = True
value = Filename(value)
else:
raise TypeError(
"Supplied value must be string or unicode, not %r" %
type(value).__name__
)
return value
class Commandline(tuple):
""" Represents a command line """
def __new__(cls, command, charset, filecharset):
""" Constructor
:Parameters:
- `command`: The command to parse
- `charset`: The charset to apply on strings
- `filecharset`: The charset to apply on the command
:Types:
- `command`: ``basestring``
- `charset`: ``str``
- `filecharset`: ``str``
:return: A new `Commandline` instance
:rtype: `Commandline`
:Exceptions:
- `UnicodeError`: Error while recoding the command
- `ValueError`: Invalid command line
"""
if not command:
return None
command = util.splitCommand(command)
if isinstance(command[0], str):
command[1:] = [unicode(item, charset) for item in command[1:]]
command[0] = util.filename.toLocale(command[0], charset, filecharset)
return tuple.__new__(cls, command)
class CommandlineMember(_base.BasePremapMember):
""" Commandline storage """
def doTransform(self, value):
""" Parses as command line with quoted arguments
:Exceptions:
- `UnicodeError`: `value` could not be en-/decoded
- `TypeError`: `value` is neither ``str`` nor ``unicode`` nor
`Commandline`
- `ValueError`: `value` represents an invalid command line
"""
if isinstance(value, basestring):
value = Commandline(value, self.CHARSET, self.FILECHARSET)
elif not isinstance(value, Commandline):
raise TypeError(
"Supplied value must be string or unicode, not %r" %
type(value).__name__
)
return value
class QuotedString(str):
""" Holds a quoted string """
_quoterex = (
re.compile(r'(?:[^"\s]\S*|"[^\\"]*(?:\\[\\"][^\\"]*)*")$'),
re.compile(r'\\([\\"])')
)
def __new__(cls, value = ''):
""" Initialization and check
:param value: The value to initialize the string
:type value: ``str``
:exception ValueError: The string did not pass the test
"""
checkre, subre = cls._quoterex
if value and not checkre.match(value):
raise ValueError("Could not parse quoted string %r" % value)
if value.startswith('"'):
value = subre.sub(r'\1', value[1:-1])
return str.__new__(cls, value)
def __repr__(self):
""" Returns the representation of the quoted string """
return repr(
'"%s"' % str(self).replace('\\', r'\\').replace('"', r'\"')
)
class QuotedstringMember(_base.BasePremapMember):
""" Quoted | |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 <NAME>
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil.relativedelta as relativedelta
import logging
import lxml
import urlparse
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp import tools, api
from openerp.tools.translate import _
from urllib import urlencode, quote as quote
_logger = logging.getLogger(__name__)
def format_tz(pool, cr, uid, dt, tz=False, format=False, context=None):
context = dict(context or {})
if tz:
context['tz'] = tz or pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz'] or "UTC"
timestamp = datetime.datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
if format:
return ts.strftime(format)
else:
lang = context.get("lang")
lang_params = {}
if lang:
res_lang = pool.get('res.lang')
ids = res_lang.search(cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
fdate = ts.strftime(format_date)
ftime = ts.strftime(format_time)
return "%s %s%s" % (fdate, ftime, (' (%s)' % tz) if tz else '')
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class email_template(osv.osv):
"Templates for sending email"
_name = "email.template"
_description = 'Email Templates'
_order = 'name'
def default_get(self, cr, uid, fields, context=None):
res = super(email_template, self).default_get(cr, uid, fields, context)
if res.get('model'):
res['model_id'] = self.pool['ir.model'].search(cr, uid, [('model', '=', res.pop('model'))], context=context)[0]
return res
def _replace_local_links(self, cr, uid, html, context=None):
""" Post-processing of html content to replace local links to absolute
links, using web.base.url as base url. """
if not html:
return html
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
(base_scheme, base_netloc, bpath, bparams, bquery, bfragment) = urlparse.urlparse(base_url)
def _process_link(url):
new_url = url
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if not scheme and not netloc:
new_url = urlparse.urlunparse((base_scheme, base_netloc, path, params, query, fragment))
return new_url
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag == 'a' and node.get('href'):
node.set('href', _process_link(node.get('href')))
elif node.tag == 'img' and not node.get('src', 'data').startswith('data'):
node.set('src', _process_link(node.get('src')))
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html
def render_post_process(self, cr, uid, html, context=None):
html = self._replace_local_links(cr, uid, html, context=context)
return html
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
"""Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with
an evaluation context containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
if context is None:
context = {}
res_ids = filter(None, res_ids) # to avoid browsing [None] below
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
template = mako_template_env.from_string(tools.ustr(template))
except Exception:
_logger.exception("Failed to load template %r", template)
return results
# prepare template variables
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
records = self.pool[model].browse(cr, uid, res_ids, context=context) or [None]
variables = {
'format_tz': lambda dt, tz=False, format=False, context=context: format_tz(self.pool, cr, uid, dt, tz, format, context),
'user': user,
'ctx': context, # context kw would clash with mako internals
}
for record in records:
res_id = record.id if record else None
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.exception("Failed to render template %r using values %r" % (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.iteritems():
results[res_id] = self.render_post_process(cr, uid, result, context=context)
return results
def get_email_template_batch(self, cr, uid, template_id=False, res_ids=None, context=None):
if context is None:
context = {}
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not template_id:
return results
template = self.browse(cr, uid, template_id, context)
langs = self.render_template_batch(cr, uid, template.lang, template.model, res_ids, context)
for res_id, lang in langs.iteritems():
if lang:
# Use translated template if necessary
ctx = context.copy()
ctx['lang'] = lang
template = self.browse(cr, uid, template.id, ctx)
else:
template = self.browse(cr, uid, int(template_id), context)
results[res_id] = template
return results
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
mod_name = False
if model_id:
mod_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': {'model': mod_name}}
_columns = {
'name': fields.char('Name'),
'model_id': fields.many2one('ir.model', 'Applies to', help="The kind of document with with this template can be used"),
'model': fields.related('model_id', 'model', type='char', string='Related Document Model',
select=True, store=True, readonly=True),
'lang': fields.char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language, e.g. "
"${object.partner_id.lang}.",
placeholder="${object.partner_id.lang}"),
'user_signature': fields.boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message"),
'subject': fields.char('Subject', translate=True, help="Subject (placeholders may be used here)",),
'email_from': fields.char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address."),
'use_default_to': fields.boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)"),
'email_to': fields.char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)"),
'partner_to': fields.char('To (Partners)',
help="Comma-separated ids of recipient partners (placeholders may be used here)",
oldname='email_recipients'),
'email_cc': fields.char('Cc', help="Carbon copy recipients (placeholders may be used here)"),
'reply_to': fields.char('Reply-To', help="Preferred response address (placeholders may be used here)"),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used."),
'body_html': fields.html('Body', translate=True, sanitize=False, help="Rich-text/HTML version of the message (placeholders may be used here)"),
'report_name': fields.char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type."),
'report_template': fields.many2one('ir.actions.report.xml', 'Optional report to print and attach'),
'ref_ir_act_window': fields.many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,
help="Sidebar action to make this template available on | |
<gh_stars>1-10
import logging
from settings import *
import rascsi_interface_pb2 as proto
def get_server_info():
"""
Sends a SERVER_INFO command to the server.
Returns a dict with:
- boolean status
- str version (RaSCSI version number)
- list of str log_levels (the log levels RaSCSI supports)
- str current_log_level
- list of int reserved_ids
- 5 distinct lists of strs with file endings recognized by RaSCSI
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.SERVER_INFO
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
version = str(result.server_info.version_info.major_version) + "." +\
str(result.server_info.version_info.minor_version) + "." +\
str(result.server_info.version_info.patch_version)
log_levels = result.server_info.log_level_info.log_levels
current_log_level = result.server_info.log_level_info.current_log_level
reserved_ids = list(result.server_info.reserved_ids_info.ids)
image_dir = result.server_info.image_files_info.default_image_folder
# Creates lists of file endings recognized by RaSCSI
mappings = result.server_info.mapping_info.mapping
sahd = []
schd = []
scrm = []
scmo = []
sccd = []
for m in mappings:
if mappings[m] == proto.PbDeviceType.SAHD:
sahd.append(m)
elif mappings[m] == proto.PbDeviceType.SCHD:
schd.append(m)
elif mappings[m] == proto.PbDeviceType.SCRM:
scrm.append(m)
elif mappings[m] == proto.PbDeviceType.SCMO:
scmo.append(m)
elif mappings[m] == proto.PbDeviceType.SCCD:
sccd.append(m)
return {
"status": result.status,
"version": version,
"log_levels": log_levels,
"current_log_level": current_log_level,
"reserved_ids": reserved_ids,
"image_dir": image_dir,
"sahd": sahd,
"schd": schd,
"scrm": scrm,
"scmo": scmo,
"sccd": sccd,
}
def get_network_info():
"""
Sends a NETWORK_INTERFACES_INFO command to the server.
Returns a dict with:
- boolean status
- list of str ifs (network interfaces detected by RaSCSI)
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.NETWORK_INTERFACES_INFO
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
ifs = result.network_interfaces_info.name
return {"status": result.status, "ifs": ifs}
def get_device_types():
"""
Sends a DEVICE_TYPES_INFO command to the server.
Returns a dict with:
- boolean status
- list of str device_types (device types that RaSCSI supports, ex. SCHD, SCCD, etc)
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.DEVICE_TYPES_INFO
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
device_types = []
for t in result.device_types_info.properties:
device_types.append(proto.PbDeviceType.Name(t.type))
return {"status": result.status, "device_types": device_types}
def get_valid_scsi_ids(devices, reserved_ids):
"""
Takes a list of dicts devices, and list of ints reserved_ids.
Returns:
- list of ints valid_ids, which are the SCSI ids that are not reserved
- int recommended_id, which is the id that the Web UI should default to recommend
"""
occupied_ids = []
for d in devices:
occupied_ids.append(d["id"])
unoccupied_ids = [i for i in list(range(8)) if i not in reserved_ids + occupied_ids]
unoccupied_ids.sort()
valid_ids = [i for i in list(range(8)) if i not in reserved_ids]
valid_ids.sort(reverse=True)
if len(unoccupied_ids) > 0:
recommended_id = unoccupied_ids[-1]
else:
recommended_id = occupied_ids.pop(0)
return valid_ids, recommended_id
def attach_image(scsi_id, **kwargs):
"""
Takes int scsi_id and kwargs containing 0 or more device properties
If the current attached device is a removable device wihout media inserted,
this sends a INJECT command to the server.
If there is no currently attached device, this sends the ATTACH command to the server.
Returns boolean status and str msg
"""
command = proto.PbCommand()
devices = proto.PbDeviceDefinition()
devices.id = int(scsi_id)
if "device_type" in kwargs.keys():
if kwargs["device_type"] not in [None, ""]:
devices.type = proto.PbDeviceType.Value(str(kwargs["device_type"]))
if "unit" in kwargs.keys():
if kwargs["unit"] not in [None, ""]:
devices.unit = kwargs["unit"]
if "image" in kwargs.keys():
if kwargs["image"] not in [None, ""]:
devices.params["file"] = kwargs["image"]
# Handling the inserting of media into an attached removable type device
device_type = kwargs.get("device_type", None)
currently_attached = list_devices(scsi_id, kwargs.get("unit"))["device_list"]
if len(currently_attached) > 0:
current_type = currently_attached[0]["device_type"]
else:
current_type = None
if device_type in REMOVABLE_DEVICE_TYPES and current_type in REMOVABLE_DEVICE_TYPES:
if current_type != device_type:
return {"status": False, "msg": f"Cannot insert an image for \
{device_type} into a {current_type} device."}
else:
command.operation = proto.PbOperation.INSERT
# Handling attaching a new device
else:
command.operation = proto.PbOperation.ATTACH
if "interfaces" in kwargs.keys():
if kwargs["interfaces"] not in [None, ""]:
devices.params["interfaces"] = kwargs["interfaces"]
if "vendor" in kwargs.keys():
if kwargs["vendor"] != None:
devices.vendor = kwargs["vendor"]
if "product" in kwargs.keys():
if kwargs["product"] != None:
devices.product = kwargs["product"]
if "revision" in kwargs.keys():
if kwargs["revision"] != None:
devices.revision = kwargs["revision"]
if "block_size" in kwargs.keys():
if kwargs["block_size"] not in [None, ""]:
devices.block_size = int(kwargs["block_size"])
command.devices.append(devices)
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
return {"status": result.status, "msg": result.msg}
def detach_by_id(scsi_id, un=None):
"""
Takes int scsi_id and optional int un
Sends a DETACH command to the server.
Returns boolean status and str msg.
"""
devices = proto.PbDeviceDefinition()
devices.id = int(scsi_id)
if un != None:
devices.unit = int(un)
command = proto.PbCommand()
command.operation = proto.PbOperation.DETACH
command.devices.append(devices)
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
return {"status": result.status, "msg": result.msg}
def detach_all():
"""
Sends a DETACH_ALL command to the server.
Returns boolean status and str msg.
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.DETACH_ALL
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
return {"status": result.status, "msg": result.msg}
def eject_by_id(scsi_id, un=None):
"""
Takes int scsi_id and optional int un.
Sends an EJECT command to the server.
Returns boolean status and str msg.
"""
devices = proto.PbDeviceDefinition()
devices.id = int(scsi_id)
if un != None:
devices.unit = int(un)
command = proto.PbCommand()
command.operation = proto.PbOperation.EJECT
command.devices.append(devices)
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
return {"status": result.status, "msg": result.msg}
def list_devices(scsi_id=None, un=None):
"""
Takes optional int scsi_id and optional int un.
Sends a DEVICES_INFO command to the server.
If no scsi_id is provided, returns a list of dicts of all attached devices.
If scsi_id is is provided, returns a list of one dict for the given device.
If no attached device is found, returns an empty list.
Returns boolean status, list of dicts device_list
"""
from os import path
command = proto.PbCommand()
command.operation = proto.PbOperation.DEVICES_INFO
# If method is called with scsi_id parameter, return the info on those devices
# Otherwise, return the info on all attached devices
if scsi_id != None:
device = proto.PbDeviceDefinition()
device.id = int(scsi_id)
if un != None:
device.unit = int(un)
command.devices.append(device)
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
device_list = []
n = 0
# Return an empty list if no devices are attached
if len(result.devices_info.devices) == 0:
return {"status": False, "device_list": []}
while n < len(result.devices_info.devices):
did = result.devices_info.devices[n].id
dun = result.devices_info.devices[n].unit
dtype = proto.PbDeviceType.Name(result.devices_info.devices[n].type)
dstat = result.devices_info.devices[n].status
dprop = result.devices_info.devices[n].properties
# Building the status string
# TODO: This formatting should probably be moved elsewhere
dstat_msg = []
if dprop.read_only == True:
dstat_msg.append("Read-Only")
if dstat.protected == True and dprop.protectable == True:
dstat_msg.append("Write-Protected")
if dstat.removed == True and dprop.removable == True:
dstat_msg.append("No Media")
if dstat.locked == True and dprop.lockable == True:
dstat_msg.append("Locked")
dpath = result.devices_info.devices[n].file.name
dfile = path.basename(dpath)
dparam = result.devices_info.devices[n].params
dven = result.devices_info.devices[n].vendor
dprod = result.devices_info.devices[n].product
drev = result.devices_info.devices[n].revision
dblock = result.devices_info.devices[n].block_size
dsize = int(result.devices_info.devices[n].block_count) * int(dblock)
device_list.append(
{
"id": did,
"un": dun,
"device_type": dtype,
"status": ", ".join(dstat_msg),
"image": dpath,
"file": dfile,
"params": dparam,
"vendor": dven,
"product": dprod,
"revision": drev,
"block_size": dblock,
"size": dsize,
}
)
n += 1
return {"status": True, "device_list": device_list}
def sort_and_format_devices(devices):
"""
Takes a list of dicts devices and returns a list of dicts.
Sorts by SCSI ID acending (0 to 7).
For SCSI IDs where no device is attached, inject a dict with placeholder text.
"""
occupied_ids = []
for d in devices:
occupied_ids.append(d["id"])
formatted_devices = devices
# Add padding devices and sort the list
for id in range(8):
if id not in occupied_ids:
formatted_devices.append({"id": id, "device_type": "-", \
"status": "-", "file": "-", "product": "-"})
# Sort list of devices by id
formatted_devices.sort(key=lambda dic: str(dic["id"]))
return formatted_devices
def set_log_level(log_level):
"""
Sends a LOG_LEVEL command to the server.
Takes str log_level as an argument.
Returns boolean status and str msg.
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.LOG_LEVEL
command.params["level"] = str(log_level)
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
return {"status": result.status, "msg": result.msg}
def send_pb_command(payload):
"""
Takes a str containing a serialized protobuf as argument.
Establishes a socket connection with RaSCSI.
"""
# Host and port number where rascsi is listening for socket connections
HOST = 'localhost'
PORT = 6868
counter = 0
tries = 100
error_msg = ""
import socket
while counter < tries:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
return send_over_socket(s, payload)
except socket.error as error:
counter += 1
logging.warning("The RaSCSI service is not responding - attempt " + \
str(counter) + "/" + str(tries))
error_msg = str(error)
logging.error(error_msg)
| |
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None, withheader=True):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
*withheader*: if withheader is False, do not write the attribute
names in the first row
for formatd type FormatFloat, we override the precision to store
full precision floats in the CSV file
.. seealso::
:func:`csv2rec`
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
if r.ndim != 1:
raise ValueError('rec2csv only operates on 1 dimensional recarrays')
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
if withheader:
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi,interp='nn'):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
If interp keyword is set to '`nn`' (default),
uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by <NAME>. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
If the interp keyword is set to '`linear`', then linear interpolation
is used instead of natural neighbor. In this case, the output grid
is assumed to be regular with a constant grid spacing in both the x and
y directions. For regular grids with nonconstant grid spacing, you
must use natural neighbor interpolation. Linear interpolation is only valid if
:mod:`matplotlib.delaunay` package is used - :mod:`mpl_tookits.natgrid`
only provides natural neighbor interpolation.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
# make sure mask is not a scalar boolean array.
if z.mask.ndim:
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if interp != 'nn':
raise ValueError("only natural neighor interpolation"
" allowed when using natgrid toolkit in griddata.")
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError('output grid defined by xi,yi must be monotone increasing')
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use <NAME>'s delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
if interp == 'nn':
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
elif interp == 'linear':
# make sure grid has constant dx, dy
dx = xi[0,1:]-xi[0,0:-1]
dy = yi[1:,0]-yi[0:-1,0]
epsx = np.finfo(xi.dtype).resolution
epsy = np.finfo(yi.dtype).resolution
if dx.max()-dx.min() > epsx or dy.max()-dy.min() > epsy:
raise ValueError("output grid must have constant spacing"
" when using interp='linear'")
interp = tri.linear_interpolator(z)
zo = interp[yi.min():yi.max():complex(0,yi.shape[0]),
xi.min():xi.max():complex(0,xi.shape[1])]
else:
raise ValueError("interp keyword must be one of"
" 'linear' (for linear interpolation) or 'nn'"
" (for natural neighbor interpolation). Default is 'nn'.")
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by <NAME> (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
<NAME>, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by <NAME>,
Icelandic | |
<filename>spring/docgen.py
import math
import random
import time
from hashlib import md5
from typing import Iterator, List, Tuple
import numpy as np
from fastdocgen import build_achievements
from spring.dictionary import (
NUM_STATES,
NUM_STREET_SUFFIXES,
STATES,
STREET_SUFFIX,
)
from spring.settings import WorkloadSettings
PRIME = 971
class Generator:
def __init__(self):
self.prefix = None
def __iter__(self):
return self
def add_prefix(self, key: str) -> str:
if self.prefix:
return '%s-%s' % (self.prefix, key)
return key
class NewOrderedKey(Generator):
"""Generate ordered keys with an optional common prefix.
These keys are usually used for inserting new documents into the database.
Example: "<KEY>"
The suffix is a 12 characters long string consisting of digits from 0 to 9.
This key pattern is rather uncommon in real-world scenarios.
"""
def __init__(self, prefix: str):
self.prefix = prefix
def next(self, curr_items) -> str:
key = '%012d' % curr_items
key = self.add_prefix(key)
return key
class KeyForRemoval(Generator):
"""Pick an existing key at the beginning of the key space."""
def __init__(self, prefix: str):
self.prefix = prefix
def next(self, curr_deletes: int) -> str:
key = '%012d' % curr_deletes
return self.add_prefix(key)
class UniformKey(Generator):
"""Randomly sample an existing key from the entire key space.
Sampling uses discrete uniform distribution.
|<-------------------- key space -------------------->|
|xxxxxxxxx|...........................................|
^ ^
| |
curr_deletes curr_items
This generator should not be used when the key access pattern is important.
"""
def __init__(self, prefix: str):
self.prefix = prefix
def next(self, curr_items: int, curr_deletes: int, *args) -> str:
key = np.random.random_integers(low=curr_deletes,
high=curr_items - 1)
key = '%012d' % key
return self.add_prefix(key)
class WorkingSetKey(Generator):
"""Extend UniformKey by sampling keys from the fixed working set.
Working set is a subset of the entire key space.
There are two options that characterize the working set:
* working_set - a percentage (from 0 to 100) of the entire key space that
should be considered as the working set.
* working_set_access - a percentage (from 0 to 100) that defines the
probability at which the keys from the working set are being used. This
parameter implements deterministic cache miss ratio.
|<--------------------------- key space ------------------------->|
|<----------- cold items ---------->|<---- hot items ---->|
|xxxxxxx|.........................................................|
^ ^
| |
curr_deletes curr_items
"""
def __init__(self, ws: WorkloadSettings, prefix: str):
self.num_hot_items = int(ws.items * ws.working_set / 100)
self.working_set_access = ws.working_set_access
self.prefix = prefix
def next(self, curr_items: int, curr_deletes: int, *args) -> str:
num_cold_items = curr_items - self.num_hot_items
if random.randint(0, 100) <= self.working_set_access: # cache hit
left_boundary = num_cold_items
right_boundary = curr_items
else: # cache miss
left_boundary = curr_deletes
right_boundary = num_cold_items
key = np.random.random_integers(low=left_boundary,
high=right_boundary - 1)
key = '%012d' % key
return self.add_prefix(key)
class MovingWorkingSetKey(Generator):
def __init__(self, ws: WorkloadSettings, prefix: str):
self.working_set = ws.working_set
self.working_set_access = ws.working_set_access
self.working_set_moving_docs = ws.working_set_moving_docs
self.prefix = prefix
def next(self, curr_items: int, curr_deletes: int,
current_hot_load_start: int, timer_elapse: int) -> str:
num_existing_items = curr_items - curr_deletes
num_hot_items = int(num_existing_items * self.working_set / 100)
if timer_elapse.value:
timer_elapse.value = 0
# Create next hot_load_start, add working_set_move_docs and then
# modulus to prevent going beyond num_docs
num_items = num_existing_items - num_hot_items
offset = current_hot_load_start.value + self.working_set_moving_docs
current_hot_load_start.value = int(offset % num_items)
left_boundary = curr_deletes + current_hot_load_start.value
right_boundary = left_boundary + num_hot_items
key = np.random.random_integers(low=left_boundary,
high=right_boundary - 1)
key = '%012d' % key
return self.add_prefix(key)
class ZipfKey(Generator):
ALPHA = 1.9
def __init__(self, prefix: str):
self.prefix = prefix
def next(self, curr_items: int, curr_deletes: int, *args) -> str:
key = curr_items - np.random.zipf(a=self.ALPHA)
if key <= curr_deletes:
key = curr_items - 1
key = '%012d' % key
return self.add_prefix(key)
class SequentialKey(Generator):
"""Sequentially generate new keys equally divided the workers.
SequentialKey equally divides the key space between the workers and
sequentially iterates over a given part of the key space (based on the
sequential worker identifier).
This generator is used for loading data.
"""
def __init__(self, sid: int, ws: WorkloadSettings, prefix: str):
self.sid = sid
self.ws = ws
self.prefix = prefix
def __iter__(self) -> Iterator[str]:
for seq_id in range(self.sid, self.ws.items, self.ws.workers):
key = '%012d' % seq_id
key = self.add_prefix(key)
yield key
class HotKey(Generator):
"""Generate the existing keys equally divided between the workers.
HotKey equally divides the working set between the workers and iterates over
a given part of the working set (based on the sequential worker identifier).
This generator is used for warming up the working set.
"""
def __init__(self, sid: int, ws: WorkloadSettings, prefix: str):
self.sid = sid
self.ws = ws
self.prefix = prefix
def __iter__(self):
num_hot_keys = int(self.ws.items * self.ws.working_set / 100)
num_cold_items = self.ws.items - num_hot_keys
for seq_id in range(num_cold_items + self.sid,
self.ws.items,
self.ws.workers):
key = '%012d' % seq_id
key = self.add_prefix(key)
yield key
class UnorderedKey(Generator):
"""Improve SequentialKey by randomizing the order of insertions.
The key space is still the same.
"""
def __init__(self, sid: int, ws: WorkloadSettings, prefix: str):
self.sid = sid
self.ws = ws
self.prefix = prefix
def __iter__(self):
keys_per_workers = self.ws.items // self.ws.workers
key_id = 0
for _ in range(keys_per_workers):
key_id = (key_id + PRIME) % keys_per_workers
key = '%012d' % (key_id + self.sid * keys_per_workers)
key = self.add_prefix(key)
yield key
class KeyForCASUpdate(Generator):
def __init__(self, total_workers: int, prefix: str):
self.n1ql_workers = total_workers
self.prefix = prefix
def next(self, sid: int, curr_items: int) -> str:
per_worker_items = curr_items // self.n1ql_workers
left_boundary = sid * per_worker_items
right_boundary = left_boundary + per_worker_items
key = np.random.random_integers(low=left_boundary,
high=right_boundary - 1)
key = '%012d' % key
return self.add_prefix(key)
class FTSKey(Generator):
def __init__(self, ws: WorkloadSettings):
self.mutate_items = 0
if ws.fts_config:
self.mutate_items = ws.fts_config.mutate_items
def next(self) -> str:
return hex(random.randint(0, self.mutate_items))[2:]
class HashKeys:
def __init__(self, ws: WorkloadSettings):
self.ws = ws
def hash_it(self, key: str) -> str:
if self.ws.hash_keys:
key = key.encode('utf-8')
if self.ws.key_length:
num_slices = int(math.ceil(self.ws.key_length / 32))
doc_key = num_slices * md5(key).hexdigest()
return doc_key[:self.ws.key_length]
return md5(key).hexdigest()
return key
class String(Generator):
def __init__(self, avg_size: int):
self.avg_size = avg_size
@staticmethod
def _build_alphabet(key: str) -> str:
_key = key.encode('utf-8')
return md5(_key).hexdigest() + md5(_key[::-1]).hexdigest()
@staticmethod
def _build_string(alphabet: str, length: float) -> str:
length_int = int(length)
num_slices = int(math.ceil(length / 64)) # 64 == len(alphabet)
body = num_slices * alphabet
return body[:length_int]
def next(self, key) -> str:
alphabet = self._build_alphabet(key)
return self._build_string(alphabet, self.avg_size)
class Document(String):
SIZE_VARIATION = 0.25 # 25%
OVERHEAD = 205 # Minimum size due to static fields, body size is variable
@classmethod
def _get_variation_coeff(cls) -> float:
return np.random.uniform(1 - cls.SIZE_VARIATION, 1 + cls.SIZE_VARIATION)
@staticmethod
def _build_name(alphabet: str) -> str:
return '%s %s' % (alphabet[:6], alphabet[6:12]) # % is faster than format()
@staticmethod
def _build_email(alphabet: str) -> str:
return '%<EMAIL>' % (alphabet[12:18], alphabet[18:24])
@staticmethod
def _build_alt_email(alphabet: str) -> str:
name = random.randint(1, 9)
domain = random.randint(12, 18)
return '%<EMAIL>' % (alphabet[name:name + 6], alphabet[domain:domain + 6])
@staticmethod
def _build_city(alphabet: str) -> str:
return alphabet[24:30]
@staticmethod
def _build_realm(alphabet: str) -> str:
return alphabet[30:36]
@staticmethod
def _build_country(alphabet: str) -> str:
return alphabet[42:48]
@staticmethod
def _build_county(alphabet: str) -> str:
return alphabet[48:54]
@staticmethod
def _build_street(alphabet: str) -> str:
return alphabet[54:62]
@staticmethod
def _build_coins(alphabet: str) -> float:
return max(0.1, int(alphabet[36:40], 16) / 100)
@staticmethod
def _build_gmtime(alphabet: str) -> Tuple[int]:
seconds = 396 * 24 * 3600 * (int(alphabet[63], 16) % 12)
return tuple(time.gmtime(seconds))
@staticmethod
def _build_year(alphabet: str) -> int:
return 1985 + int(alphabet[62], 16)
@staticmethod
def _build_state(alphabet: str) -> str:
idx = alphabet.find('7') % NUM_STATES
return STATES[idx][0]
@staticmethod
def _build_full_state(alphabet: str) -> str:
idx = alphabet.find('8') % NUM_STATES
return STATES[idx][1]
@staticmethod
def _build_category(alphabet: str) -> int:
return int(alphabet[41], 16) % 3
@staticmethod
def _build_achievements(alphabet: str) -> List[int]:
return build_achievements(alphabet) or [0]
def _size(self) -> float:
if self.avg_size <= self.OVERHEAD:
return 0
return self._get_variation_coeff() * (self.avg_size - self.OVERHEAD)
def next(self, key) -> dict:
alphabet = self._build_alphabet(key)
size = self._size()
return {
'name': self._build_name(alphabet),
'email': self._build_email(alphabet),
'alt_email': self._build_alt_email(alphabet),
'city': self._build_city(alphabet),
'realm': self._build_realm(alphabet),
'coins': self._build_coins(alphabet),
'category': self._build_category(alphabet),
'achievements': self._build_achievements(alphabet),
'body': self._build_string(alphabet, size),
'channels': '123',
}
class NestedDocument(Document):
OVERHEAD = 450 # Minimum size due to static fields, body size is variable
def __init__(self, avg_size: int):
super().__init__(avg_size)
self.capped_field_value = {} # type: dict
def _size(self) -> float:
if self.avg_size <= self.OVERHEAD:
return 0
if random.random() < 0.975: # Normal distribution, mean=self.avg_size
normal = np.random.normal(loc=1.0, scale=0.17)
return (self.avg_size - self.OVERHEAD) * normal
else: # Outliers - beta distribution, 2KB-2MB range
return 2048 / | |
word_of_perms(sl2z_word_problem(x), self.L(), self.R())
#
# Group stuff
#
def is_normal(self):
r"""
Test whether the group is normal
EXAMPLES::
sage: G = Gamma(2).as_permutation_group()
sage: G.is_normal()
True
sage: G = Gamma1(2).as_permutation_group()
sage: G.is_normal()
False
"""
N = self.index()
G = self.relabel(inplace=False)
s2 = G._S2
s3 = G._S3
ss2 = [None]*N
ss3 = [None]*N
for j in [s2[0],s3[0]]:
m = G._canonical_rooted_labels(j)
for i in range(N):
ss2[m[i]] = m[s2[i]]
ss3[m[i]] = m[s3[i]]
if s2 != ss2 or s3 != ss3:
return False
return True
def _conjugate(self,j0):
r"""
Return the conjugate of self rooted at j0.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2='(1,2)(3,4)',S3='(1,2,3)(4,5,6)')
sage: G
Arithmetic subgroup with permutations of right cosets
S2=(1,2)(3,4)
S3=(1,2,3)(4,5,6)
L=(1,4,6,5,3)
R=(2,4,5,6,3)
sage: G._conjugate(0) == G
True
sage: G._conjugate(4)
Arithmetic subgroup with permutations of right cosets
S2=(3,4)(5,6)
S3=(1,2,3)(4,5,6)
L=(1,4,5,3,2)
R=(1,2,4,6,3)
"""
N = self.index()
s2 = self._S2
s3 = self._S3
l = self._L
r = self._R
ss2 = [None]*N
ss3 = [None]*N
ll = [None]*N
rr = [None]*N
m = self._canonical_rooted_labels(j0)
for i in range(N):
ss2[m[i]] = m[s2[i]]
ss3[m[i]] = m[s3[i]]
ll[m[i]] = m[l[i]]
rr[m[i]] = m[r[i]]
return self.__class__(ss2,ss3,ll,rr,True)
def coset_graph(self,
right_cosets=False,
s2_edges=True, s3_edges=True, l_edges=False, r_edges=False,
s2_label='s2', s3_label='s3', l_label='l', r_label='r'):
r"""
Return the right (or left) coset graph.
INPUT:
- ``right_cosets`` - bool (default: False) - right or left coset graph
- ``s2_edges`` - bool (default: True) - put edges associated to s2
- ``s3_edges`` - bool (default: True) - put edges associated to s3
- ``l_edges`` - bool (default: False) - put edges associated to l
- ``r_edges`` - bool (default: False) - put edges associated to r
- ``s2_label``, ``s3_label``, ``l_label``, ``r_label`` - the labels to
put on the edges corresponding to the generators action. Use ``None``
for no label.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2)",S3="()")
sage: G
Arithmetic subgroup with permutations of right cosets
S2=(1,2)
S3=()
L=(1,2)
R=(1,2)
sage: G.index()
2
sage: G.coset_graph()
Looped multi-digraph on 2 vertices
"""
from sage.graphs.digraph import DiGraph
res = DiGraph(multiedges=True,loops=True)
res.add_vertices(list(range(self.index())))
if right_cosets: # invert the permutations
S2 = [None]*self.index()
S3 = [None]*self.index()
L = [None]*self.index()
R = [None]*self.index()
for i in range(self.index()):
S2[self._S2[i]] = i
S3[self._S3[i]] = i
L[self._L[i]] = i
R[self._R[i]] = i
else:
S2 = self._S2
S3 = self._S3
L = self._L
R = self._R
if s2_edges:
if s2_label is not None:
res.add_edges((i,S2[i],s2_label) for i in range(self.index()))
else:
res.add_edges((i,S2[i]) for i in range(self.index()))
if s3_edges:
if s3_label is not None:
res.add_edges((i,S3[i],s3_label) for i in range(self.index()))
else:
res.add_edges((i,S3) for i in range(self.index()))
if l_edges:
if l_label is not None:
res.add_edges((i,L[i],l_label) for i in range(self.index()))
else:
res.add_edges((i,L[i]) for i in range(self.index()))
if r_edges:
if r_label is not None:
res.add_edges((i,R[i],r_label) for i in range(self.index()))
else:
res.add_edges((i,R[i]) for i in range(self.index()))
res.plot.options['color_by_label'] = True
if s2_label or s3_label or l_label or r_label:
res.plot.options['edge_labels'] = True
return res
def generalised_level(self):
r"""
Return the generalised level of this subgroup.
The *generalised level* of a subgroup of the modular group is the least
common multiple of the widths of the cusps. It was proven by Wohlfart
that for even congruence subgroups, the (conventional) level coincides
with the generalised level. For odd congruence subgroups the level is
either the generalised level, or twice the generalised level [KSV2011]_.
EXAMPLES::
sage: G = Gamma(2).as_permutation_group()
sage: G.generalised_level()
2
sage: G = Gamma0(3).as_permutation_group()
sage: G.generalised_level()
3
"""
return arith.lcm(self.cusp_widths())
def congruence_closure(self):
r"""
Returns the smallest congruence subgroup containing self. If self is
congruence, this is just self, but represented as a congruence subgroup
data type. If self is not congruence, then it may be larger.
In practice, we use the following criterion: let `m` be the generalised
level of self. If this subgroup is even, let `n = m`, else let `n =
2m`. Then any congruence subgroup containing self contains `\Gamma(n)`
(a generalisation of Wohlfahrt's theorem due to Kiming, Verrill and
Schuett). So we compute the image of self modulo `n` and return the
preimage of that.
.. note::
If you just want to know if the subgroup is congruence or not, it
is *much* faster to use :meth:`~is_congruence`.
EXAMPLES::
sage: Gamma1(3).as_permutation_group().congruence_closure()
Congruence subgroup of SL(2,Z) of level 3, preimage of:
Matrix group over Ring of integers modulo 3 with 2 generators (
[1 1] [1 2]
[0 1], [0 1]
)
sage: sage.modular.arithgroup.arithgroup_perm.HsuExample10().congruence_closure() # long time (11s on sage.math, 2012)
Modular Group SL(2,Z)
"""
if self.is_even():
N = self.generalised_level()
else:
N = 2*self.generalised_level()
from .congroup_generic import CongruenceSubgroup_constructor as CS
return CS(N, [x.matrix() for x in self.gens()])
def is_congruence(self):
r"""
Return ``True`` if this is a congruence subgroup, and ``False``
otherwise.
ALGORITHM:
Uses Hsu's algorithm [Hsu1996]_. Adapted from <NAME>'s
implementation in KFarey [Kur2008]_.
For *odd* subgroups, Hsu's algorithm still works with minor
modifications, using the extension of Wohlfarht's theorem due to
Kiming, Schuett and Verrill [KSV2011]_. See [HL2014]_ for details.
The algorithm is as follows. Let `G` be a finite-index subgroup of
`{\rm SL}(2, \ZZ)`, and let `L` and `R` be the permutations of the
cosets of `G` given by the elements `\begin{pmatrix} 1 & 1 \\ 0 & 1
\end{pmatrix}` and `\begin{pmatrix} 1 & 1 \\ 0 & 1 \end{pmatrix}`. Let
`N` be the generalized level of `G` (if `G` is even) or twice the
generalized level (if `G` is odd). Then:
- if `N` is odd, `G` is congruence if and only if the relation
.. MATH::
(L R^{-1} L)^2 = (R^2 L^{1/2})^3
holds, where `1/2` is understood as the multiplicative inverse of 2
modulo N.
- if `N` is a power of 2, then `G` is congruence if and only
if the relations
.. MATH::
\begin{array}{cc}
(L R^{-1} L)^{-1} S (L R^{-1} L) S = 1 & (A1)\\
S^{-1} R S = R^{25} & (A2)\\
(L R^{-1} L)^2 = (S R^5 L R^{-1} L)^3 & (A3) \\
\end{array}
hold, where `S = L^{20} R^{1/5} L^{-4} R^{-1}`, `1/5` being the inverse
of 5 modulo N.
- if `N` is neither odd nor a power of 2, seven relations (B1-7) hold,
for which see [HL2014]_, or the source code of this function.
If the Sage verbosity flag is set (using ``set_verbose()``), then extra
output will be produced indicating which of the relations (A1-3) or
(B1-7) is not satisfied.
EXAMPLES:
Test if `{\rm SL}_2(\ZZ)` is congruence::
sage: a = ArithmeticSubgroup_Permutation(L='',R='')
sage: a.index()
1
sage: a.is_congruence()
True
This example is congruence -- it is `\Gamma_0(3)` in disguise::
sage: S2 = SymmetricGroup(4)
sage: l = S2((2,3,4))
sage: r = S2((1,3,4))
sage: G = ArithmeticSubgroup_Permutation(L=l,R=r)
sage: G
Arithmetic subgroup with permutations of right cosets
S2=(1,2)(3,4)
S3=(1,4,2)
L=(2,3,4)
R=(1,3,4)
sage: G.is_congruence()
True
This one is noncongruence::
sage: import sage.modular.arithgroup.arithgroup_perm as ap
sage: ap.HsuExample10().is_congruence()
False
The following example (taken from [KSV2011]_) shows that a lifting of a
congruence subgroup of `{\rm PSL}(2,\ZZ)` to a subgroup of `{\rm SL}(2,
\ZZ)` need not necessarily be congruence::
sage: S2 = "(1,3,13,15)(2,4,14,16)(5,7,17,19)(6,10,18,22)(8,12,20,24)(9,11,21,23)"
sage: S3 = "(1,14,15,13,2,3)(4,5,6,16,17,18)(7,8,9,19,20,21)(10,11,12,22,23,24)"
sage: G = ArithmeticSubgroup_Permutation(S2=S2,S3=S3)
sage: G.is_congruence()
False
sage: G.to_even_subgroup().is_congruence()
True
In fact `G` is a lifting to `{\rm SL}(2,\ZZ)` of the group
`\bar{\Gamma}_0(6)`::
sage: G.to_even_subgroup() == Gamma0(6)
True
"""
from sage.misc.verbose import verbose
if self.index() == 1: # the group is SL2Z (trivial case)
return True
L = self.L() # action of L
R = self.R() # action of R
if self.is_even():
N = L.order() # generalised level of the group
else:
N = 2 * L.order()
# write N as N = em where e = 2^k and m odd
m = N.odd_part()
e = N // m
if e == 1:
# N is odd
# this only gets called if self is even
onehalf = ZZ(2).inverse_mod(N) # i.e. 2^(-1) mod N
rel = (R*R*L**(-onehalf))**3
return rel.is_one()
elif m == 1:
# N is a power of 2
onefifth = ZZ(5).inverse_mod(N) # i.e. 5^(-1) mod N
S = L**20*R**onefifth*L**(-4)*~R
# congruence if the three below permutations are trivial
rel = (~L*R*~L) * S * | |
<filename>src/son/monitor/son_sp.py
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
from requests import Session, post, get
import websocket
import threading
from subprocess import call, check_output
import json
from son.profile.helper import read_yaml, write_yaml
from prometheus_client import start_http_server, Gauge
import os
import docker
from time import gmtime, strftime
import datetime
"""
This class implements the son-sp commands.
These commands translate to the API's of the SONATA SP
"""
LOG = logging.getLogger('SP_monitor')
LOG.setLevel(level=logging.INFO)
prometheus_stream_port = 8082
prometheus_server_api = 'http://127.0.0.1:9090'
prometheus_config_path = '/tmp/son-monitor/prometheus/prometheus_sdk.yml'
GK_api = 'http://sp.int3.sonata-nfv.eu:32001/api/v2/'
monitor_api = 'http://sp.int3.sonata-nfv.eu:8000/api/v1/'
son_access_config_path = "/home/steven/.son-workspace"
platform_id = 'sp1'
class Service_Platform():
def __init__(self, export_port=8082, GK_api=None, **kwargs):
self.monitor_api = kwargs.get('monitor_api', monitor_api)
self.GK_api = kwargs.get('GK_api', GK_api)
self.son_access_config_path = kwargs.get('son_access_config_path', son_access_config_path)
self.platform_id = kwargs.get('platform_id', platform_id)
# Build up our session
self.session = Session()
self.session.headers = {
"Accept": "application/json; charset=UTF-8"
}
# global parameters needed for the SP_websocket Class
global prometheus_stream_port
prometheus_stream_port = export_port
global prometheus_server_api
prometheus_server_api = kwargs.get('prometheus_server_api', prometheus_server_api)
global prometheus_config_path
prometheus_config_path = kwargs.get('prometheus_config_path', prometheus_config_path)
self.ws_thread = None
# websocket in the SP
self.ws = None
# access token to auth the SDK user
self.access_token = None
def list(self, **kwargs):
# if metric is specified, show the list of VNFs that export ths metric
metric = kwargs.get('metric')
if metric :
url = self.monitor_api + 'prometheus/metrics/name/' + metric
ret = self.session.get(url).json().get("metrics").get("result")
else:
url = self.monitor_api + 'prometheus/metrics/list'
resp = self.session.get(url)
ret = resp.json().get('metrics')
return ret
def query(self, **kwargs):
verbose = kwargs.get("verbose", False)
LOG.setLevel(level=logging.INFO)
if verbose:
LOG.setLevel(level=logging.DEBUG)
# periodically refresh token
self._get_token()
service_name = kwargs.get("service")
vnf_name = kwargs.get("vnf_name")
vdu_id = kwargs.get("vdu_id")
vnfc_id = kwargs.get("vnfc_id")
metric = kwargs.get("metric")
since = kwargs.get("since")
until = kwargs.get("until")
metric_list = []
service_desc_uuid = self._get_service_descriptor_uuid(service_name)
vnf_instances = self._get_vnf_instances(service_desc_uuid)
if len(vnf_instances) <= 0:
LOG.warning("found no VNF instances for this service descriptor uuid: {0}".format(service_desc_uuid))
else:
vnf_descriptor_uuid = self._get_VNF_descriptor_uuid(vnf_name)
for vnf_instance_uuid in vnf_instances:
vdu_id, vc_id = self._check_VNF_instance(vnf_instance_uuid, vnf_descriptor_uuid, vdu_id, vnfc_id)
if vc_id:
LOG.info("found VNF: {0} with instance uuid: {2}, vdu_id: {3} vnfc_id: {4} in service: {1} ".format(
vnf_name, service_name, vnf_instance_uuid, vdu_id, vc_id))
metric_list = self._get_async_metric(vnf_instance_uuid, vdu_id, vc_id, metric, since, until)
break
return metric_list
def stream_test(self, **kwargs):
metric = kwargs.get('metric')
vnf_name = kwargs.get('vnf_name')
action = kwargs.get('action', 'start')
if action == 'stop':
SP_websocket._config_prometheus(remove=True)
if self.ws:
self.ws.close()
# kill all running websocket streams
call(['pkill', '-f', 'son-monitor stream'])
return 'websocket closed'
# create the websocket with a filter eg: {"metric":"vm_cpu_perc","filters":["exported_instance":"vtc-vnf"]}
url = self.monitor_api + 'ws/new'
data = {'metric':str(metric), 'filters':str(list("exported_instance={}".format(vnf_name)))}
response = self.session.post(url, json=data)
code = response.status_code
if code == 200:
ws_url = response.json().get('ws_url')
LOG.info('ws_url: {}'.format(ws_url))
self.ws = SP_websocket(ws_url, vnf_name=vnf_name, metric=metric)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
self.ws_thread.join()
return 'websocket thread started'
def stream_auth(self, **kwargs):
"""
call the SONATA Gatekeeper API to request monitoring metrics
:param kwargs:
:return:
"""
verbose = kwargs.get("verbose", False)
LOG.setLevel(level=logging.INFO)
if verbose:
LOG.setLevel(level=logging.DEBUG)
action = kwargs.get('action', 'start')
if action == 'stop':
SP_websocket._config_prometheus(remove=True)
if self.ws:
self.ws.close()
# kill all running websocket streams
LOG.info('closing websocket')
call(['pkill', '-f', 'son-monitor stream'])
LOG.info('websocket closed')
return 'websocket closed'
# periodically refresh token
self._get_token()
service_name = kwargs.get("service","sonata-demo-12")
vnf_name = kwargs.get("vnf_name","vtc-vnf2")
vdu_id = kwargs.get("vdu_id")
vnfc_id = kwargs.get("vnfc_id")
metric = kwargs.get("metric")
ws_url = None
# first lookup if the service name is instantiated
service_desc_uuid = self._get_service_descriptor_uuid(service_name)
# then check if the service has an instance of this VNF
vnf_instances = self._get_vnf_instances(service_desc_uuid)
if len(vnf_instances) <= 0:
LOG.warning("found no VNF instances for this service descriptor uuid: {0}".format(service_desc_uuid))
else:
# get the descriptor uuid of this vnf
vnf_descriptor_uuid = self._get_VNF_descriptor_uuid(vnf_name)
for vnf_instance_uuid in vnf_instances:
# check if this VNF instance has the correct vdu and vnfc
vdu_id, vnfc_id = self._check_VNF_instance(vnf_instance_uuid, vnf_descriptor_uuid, vdu_id, vnfc_id)
if vnfc_id:
LOG.info("found VNF: {0} with instance uuid: {2}, vdu_id: {3} vnfc_id: {4} in service: {1} ".format(
vnf_name, service_name, vnf_instance_uuid, vdu_id, vnfc_id))
ws_url = self._get_ws_url(vnf_instance_uuid, vdu_id, vnfc_id, metric)
break
if not vnfc_id:
return 'No vnfc_id found in the record'
if not ws_url:
return 'No websocket url received'
#ws_url = 'ws://10.30.0.112:8002/ws/98adab175fd64cc4bbe50ae9505fecf6'
self.ws = SP_websocket(ws_url, vnf_name=vnf_name, metric=metric, vm_id=vnfc_id)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
self.ws_thread.join()
return 'websocket thread started'
# TODO: start background thread to refresh token
def _get_token(self):
# the credentials and token is fetched via son-access, the son-access config path must be given
token_path = os.path.join(self.son_access_config_path, 'platforms', 'token.txt')
output = check_output(['son-access', '-w', self.son_access_config_path, '-p', self.platform_id, 'auth'])
#token_path = workspace_dir + '/' + token_file
with open(token_path, 'r') as token:
self.access_token = token.read()
def _get_VNF_descriptor_uuid(self, vnf_name):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "functions"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
functions_list = resp.json()
found_functions = [function.get("uuid") for function in functions_list if function["vnfd"]["name"] == vnf_name]
if len(found_functions) > 1 or len(found_functions) == 0:
LOG.warning("found {0} functions with name: {1}".format(len(found_functions), vnf_name))
return None
else:
uuid = found_functions[0]
LOG.info("found function descriptor of {0} with uuid: {1}".format(vnf_name, uuid))
return uuid
def _check_VNF_instance(self, vnf_instance_uuid, vnf_descriptor_uuid, vdu_id=None, vnfc_id=None):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/functions"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request VNF record, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
vnf_list = resp.json()
vnf_list = [vnf for vnf in vnf_list if vnf.get("descriptor_reference") == vnf_descriptor_uuid and vnf.get("uuid") == vnf_instance_uuid]
if len(vnf_list) > 1 :
LOG.info("found multiple VNF instances with matching uuid: {0}".format(vnf_list))
return False
elif len(vnf_list) == 0 :
LOG.info("found no VNF instance with matching uuid: {0}".format(vnf_instance_uuid))
return False
# we found 1 matching vnf instance, now check if it has a vdu
LOG.info("found VNF instance with matching uuid: {0}".format(vnf_instance_uuid))
vnf_record = vnf_list[0]
vdu_list = vnf_record["virtual_deployment_units"]
if vdu_id:
vdu_list = [vdu for vdu in vdu_list if vdu.get("id") == vdu_id]
else:
#pick by default first vdu
vdu_list = [vdu_list[0]]
vdu = vdu_list[0]
vdu_id = vdu["id"]
if len(vdu_list) > 1 :
LOG.info("found multiple vdu_ids with matching id: {0} list: {1}".format(vdu_id, vdu_list))
return False
elif len(vdu_list) == 0 :
LOG.info("found no VDUs with matching id: {0}".format(vdu_id))
return False
# we found 1 matching vdu id, now check if it has a vdu instance(vnfc)
LOG.info("found VDU with matching id: {0}".format(vdu_id))
vdu = vdu_list[0]
vnfc_list = vdu["vnfc_instance"]
if vnfc_id:
vnfc_list = [vnfc for vnfc in vnfc_list if vnfc.get("id") == vnfc_id]
else:
#pick by default first vnfc
vnfc_list = [vnfc_list[0]]
vnfc = vnfc_list[0]
vnfc_id = vnfc["id"]
if len(vnfc_list) > 1 :
LOG.info("found multiple vnfc_ids with matching id: {0} list: {1}".format(vnfc_id, vnfc_list))
return False
elif len(vnfc_list) == 0 :
LOG.info("found no VNFCs with matching id: {0}".format(vnfc_id))
return False
vnfc = vnfc_list[0]
vc_id = vnfc["vc_id"]
LOG.info("found VNFC with matching id: {0} and vc_id: {1}".format(vnfc_id, vc_id))
return vdu_id, vc_id
# Get the list of all the service instances registered
def _get_service_instance_list(self):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/services"
resp = get(url, headers=headers)
LOG.info('request service instance uuid list, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
return resp.text
# Gets a registered service instance
def _get_vnf_instances(self, service_descriptor_uuid):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/services"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request service instances, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
services_list = resp.json()
found_services = [service for service in services_list if service["descriptor_reference"] == service_descriptor_uuid]
if len(found_services) > 1 or len(found_services) == 0 :
LOG.warning("found {0} service instances with descriptor uuid: {1}". format(len(found_services), service_descriptor_uuid))
return []
else:
service = found_services[0]
service_instance_uuid = service["uuid"]
vnfr_list = [vnf.get("vnfr_id") for vnf in service["network_functions"]]
LOG.info("found VNF descriptors: {}".format(json.dumps(vnfr_list,indent=2)))
| |
= self.getEmpty(1)
cv.EqualizeHist(self._getGrayscaleBitmap(), self._equalizedgraybitmap)
return self._equalizedgraybitmap
def equalize(self):
"""
**SUMMARY**
Perform a histogram equalization on the image.
**RETURNS**
Returns a grayscale SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img = img.equalize()
"""
return Image(self._getEqualizedGrayscaleBitmap())
def getPGSurface(self):
"""
**SUMMARY**
Returns the image as a pygame surface. This is used for rendering the display
**RETURNS**
A pygame surface object used for rendering.
"""
if (self._pgsurface):
return self._pgsurface
else:
if self.isGray():
self._pgsurface = pg.image.fromstring(self.getBitmap().tostring(), self.size(), "RGB")
else:
self._pgsurface = pg.image.fromstring(self.toRGB().getBitmap().tostring(), self.size(), "RGB")
return self._pgsurface
def toString(self):
"""
**SUMMARY**
Returns the image as a string, useful for moving data around.
**RETURNS**
The image, converted to rgb, then converted to a string.
"""
return self.toRGB().getBitmap().tostring()
def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, path=None, fname=None, **params):
"""
**SUMMARY**
Save the image to the specified filename. If no filename is provided then
then it will use the filename the Image was loaded from or the last
place it was saved to. You can save to lots of places, not just files.
For example you can save to the Display, a JpegStream, VideoStream,
temporary file, or Ipython Notebook.
Save will implicitly render the image's layers before saving, but the layers are
not applied to the Image itself.
**PARAMETERS**
* *filehandle_or_filename* - the filename to which to store the file. The method will infer the file type.
* *mode* - This flag is used for saving using pul.
* *verbose* - If this flag is true we return the path where we saved the file.
* *temp* - If temp is True we save the image as a temporary file and return the path
* *path* - path where temporary files needed to be stored
* *fname* - name(Prefix) of the temporary file.
* *params* - This object is used for overloading the PIL save methods. In particular
this method is useful for setting the jpeg compression level. For JPG see this documentation:
http://www.pythonware.com/library/pil/handbook/format-jpeg.htm
**EXAMPLES**
To save as a temporary file just use:
>>> img = Image('simplecv')
>>> img.save(temp=True)
It will return the path that it saved to.
Save also supports IPython Notebooks when passing it a Display object
that has been instainted with the notebook flag.
To do this just use:
>>> disp = Display(displaytype='notebook')
>>> img.save(disp)
.. Note::
You must have IPython notebooks installed for this to work
path and fname are valid if and only if temp is set to True.
.. attention::
We need examples for all save methods as they are unintuitve.
"""
#TODO, we use the term mode here when we mean format
#TODO, if any params are passed, use PIL
if temp and path!=None :
import glob
if fname==None :
fname = 'Image'
if glob.os.path.exists(path):
path = glob.os.path.abspath(path)
imagefiles = glob.glob(glob.os.path.join(path,fname+"*.png"))
num = [0]
for img in imagefiles :
num.append(int(glob.re.findall('[0-9]+$',img[:-4])[-1]))
num.sort()
fnum = num[-1]+1
fname = glob.os.path.join(path,fname+str(fnum)+".png")
self._tempFiles.append(fname)
self.save(self._tempFiles[-1])
return self._tempFiles[-1]
else :
print "Path does not exist!"
#if it's a temporary file
elif temp :
self._tempFiles.append(tempfile.NamedTemporaryFile(suffix=".png"))
self.save(self._tempFiles[-1].name)
return self._tempFiles[-1].name
if (not filehandle_or_filename):
if (self.filename):
filehandle_or_filename = self.filename
else:
filehandle_or_filename = self.filehandle
if (len(self._mLayers)):
saveimg = self.applyLayers()
else:
saveimg = self
if self._colorSpace != ColorSpace.BGR and self._colorSpace != ColorSpace.GRAY:
saveimg = saveimg.toBGR()
if not isinstance(filehandle_or_filename, basestring):
fh = filehandle_or_filename
if (not PIL_ENABLED):
logger.warning("You need the python image library to save by filehandle")
return 0
if (type(fh) == InstanceType and fh.__class__.__name__ == "JpegStreamer"):
fh.jpgdata = StringIO()
saveimg.getPIL().save(fh.jpgdata, "jpeg", **params) #save via PIL to a StringIO handle
fh.refreshtime = time.time()
self.filename = ""
self.filehandle = fh
elif (type(fh) == InstanceType and fh.__class__.__name__ == "VideoStream"):
self.filename = ""
self.filehandle = fh
fh.writeFrame(saveimg)
elif (type(fh) == InstanceType and fh.__class__.__name__ == "Display"):
if fh.displaytype == 'notebook':
try:
from IPython.core.display import Image as IPImage
except ImportError:
print "You need IPython Notebooks to use this display mode"
return
from IPython.core import display as Idisplay
tf = tempfile.NamedTemporaryFile(suffix=".png")
loc = '/tmp/' + tf.name.split('/')[-1]
tf.close()
self.save(loc)
Idisplay.display(IPImage(filename=loc))
return
else:
self.filename = ""
self.filehandle = fh
fh.writeFrame(saveimg)
else:
if (not mode):
mode = "jpeg"
saveimg.getPIL().save(fh, mode, **params)
self.filehandle = fh #set the filename for future save operations
self.filename = ""
if verbose:
print self.filename
return 1
#make a temporary file location if there isn't one
if not filehandle_or_filename:
filename = tempfile.mkstemp(suffix=".png")[-1]
else:
filename = filehandle_or_filename
#allow saving in webp format
if re.search('\.webp$', filename):
try:
#newer versions of PIL support webp format, try that first
self.getPIL().save(filename, **params)
except:
#if PIL doesn't support it, maybe we have the python-webm library
try:
from webm import encode as webmEncode
from webm.handlers import BitmapHandler, WebPHandler
except:
logger.warning('You need the webm library to save to webp format. You can download from: https://github.com/ingenuitas/python-webm')
return 0
#PNG_BITMAP_DATA = bytearray(Image.open(PNG_IMAGE_FILE).tostring())
PNG_BITMAP_DATA = bytearray(self.toString())
IMAGE_WIDTH = self.width
IMAGE_HEIGHT = self.height
image = BitmapHandler(
PNG_BITMAP_DATA, BitmapHandler.RGB,
IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_WIDTH * 3
)
result = webmEncode.EncodeRGB(image)
file(filename.format("RGB"), "wb").write(result.data)
return 1
#if the user is passing kwargs use the PIL save method.
if( params ): #usually this is just the compression rate for the image
if (not mode):
mode = "jpeg"
saveimg.getPIL().save(filename, mode, **params)
return 1
if (filename):
cv.SaveImage(filename, saveimg.getBitmap())
self.filename = filename #set the filename for future save operations
self.filehandle = ""
elif (self.filename):
cv.SaveImage(self.filename, saveimg.getBitmap())
else:
return 0
if verbose:
print self.filename
if temp:
return filename
else:
return 1
def copy(self):
"""
**SUMMARY**
Return a full copy of the Image's bitmap. Note that this is different
from using python's implicit copy function in that only the bitmap itself
is copied. This method essentially performs a deep copy.
**RETURNS**
A copy of this SimpleCV image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.copy()
"""
newimg = self.getEmpty()
cv.Copy(self.getBitmap(), newimg)
return Image(newimg, colorSpace=self._colorSpace)
def upload(self,dest,api_key=None,api_secret=None, verbose = True):
"""
**SUMMARY**
Uploads image to imgur or flickr. In verbose mode URL values are printed.
**PARAMETERS**
* *api_key* - a string of the API key.
* *api_secret* (required only for flickr) - a string of the API secret.
* *verbose* - If verbose is true all values are printed to the
screen
**RETURNS**
if uploading is successful,
- Imgur return the original image URL on success and None if it fails.
- Flick returns True on success, else returns False.
**EXAMPLE**
TO upload image to imgur
>>> img = Image("lenna")
>>> result = img.upload( 'imgur',"MY_API_KEY1234567890" )
>>> print "Uploaded To: " + result[0]
To upload image to flickr
>>> img.upload('flickr','api_key','api_secret')
>>> img.invert().upload('flickr') #Once the api keys and secret keys are cached.
**NOTES**
.. Warning::
This method requires two packages to be installed
-PyCurl
-flickr api.
.. Warning::
You must supply your own API key. See here:
- http://imgur.com/register/api_anon
- http://www.flickr.com/services/api/misc.api_keys.html
"""
if ( dest=='imgur' ) :
try:
import pycurl
except ImportError:
print "PycURL Library not installed."
return
response = StringIO()
c = pycurl.Curl()
values = [("key", api_key),
("image", (c.FORM_FILE, self.filename))]
c.setopt(c.URL, "http://api.imgur.com/2/upload.xml")
c.setopt(c.HTTPPOST, values)
c.setopt(c.WRITEFUNCTION, response.write)
c.perform()
c.close()
match = re.search(r'<hash>(\w+).*?<deletehash>(\w+).*?<original>(http://[\w.]+/[\w.]+)', response.getvalue() , re.DOTALL)
if match:
if(verbose):
print "Imgur page: http://imgur.com/" + match.group(1)
print "Original image: " + match.group(3)
print "Delete page: http://imgur.com/delete/" + match.group(2)
return [match.group(1),match.group(3),match.group(2)]
else :
if(verbose):
print "The API Key given is not valid"
return None
elif (dest=='flickr'):
global temp_token
flickr = None
try :
import flickrapi
except ImportError:
print "Flickr API is not installed. Please install it from http://pypi.python.org/pypi/flickrapi"
return False
try :
if (not(api_key==None and api_secret==None)):
self.flickr = flickrapi.FlickrAPI(api_key,api_secret,cache=True)
self.flickr.cache = flickrapi.SimpleCache(timeout=3600, max_entries=200)
self.flickr.authenticate_console('write')
temp_token = (api_key,api_secret)
else :
try :
self.flickr = flickrapi.FlickrAPI(temp_token[0],temp_token[1],cache=True)
self.flickr.authenticate_console('write')
except | |
North Carolina",3851),
("Mountain View CDP, North Carolina",3715),
("Mount Airy city, North Carolina",10227),
("Mount Gilead town, North Carolina",1161),
("Mount Holly city, North Carolina",15305),
("Mount Olive town, North Carolina",4731),
("Mount Pleasant town, North Carolina",1904),
("Moyock CDP, North Carolina",3717),
("Mulberry CDP, North Carolina",2327),
("Murfreesboro town, North Carolina",3069),
("Murphy town, North Carolina",1920),
("Murraysville CDP, North Carolina",15831),
("Myrtle Grove CDP, North Carolina",10563),
("Nags Head town, North Carolina",2894),
("Nashville town, North Carolina",5523),
("Navassa town, North Carolina",1677),
("Neuse Forest CDP, North Carolina",1377),
("New Bern city, North Carolina",29958),
("Newland town, North Carolina",856),
("New London town, North Carolina",683),
("Newport town, North Carolina",4624),
("Newton city, North Carolina",13005),
("Newton Grove town, North Carolina",546),
("Norlina town, North Carolina",1101),
("Norman town, North Carolina",98),
("Northchase CDP, North Carolina",3678),
("Northlakes CDP, North Carolina",1586),
("North Topsail Beach town, North Carolina",995),
("Northwest city, North Carolina",922),
("North Wilkesboro town, North Carolina",4201),
("Norwood town, North Carolina",2800),
("Oakboro town, North Carolina",1872),
("Oak City town, North Carolina",272),
("Oak Island town, North Carolina",7612),
("Oak Ridge town, North Carolina",6829),
("Ocean Isle Beach town, North Carolina",705),
("Ocracoke CDP, North Carolina",564),
("Ogden CDP, North Carolina",7692),
("Old Fort town, North Carolina",900),
("Old Hundred CDP, North Carolina",420),
("Oriental town, North Carolina",996),
("Orrum town, North Carolina",63),
("Ossipee town, North Carolina",488),
("Oxford city, North Carolina",8648),
("Pantego town, North Carolina",211),
("Parkton town, North Carolina",507),
("Parmele town, North Carolina",249),
("Patterson Springs town, North Carolina",882),
("Peachland town, North Carolina",496),
("Peletier town, North Carolina",821),
("Pembroke town, North Carolina",3044),
("Pikeville town, North Carolina",649),
("Pilot Mountain town, North Carolina",1483),
("Pinebluff town, North Carolina",1471),
("Pinehurst village, North Carolina",15821),
("Pine Knoll Shores town, North Carolina",1368),
("Pine Level town, North Carolina",2132),
("Pinetops town, North Carolina",1273),
("Pinetown CDP, North Carolina",110),
("Pineville town, North Carolina",8574),
("Piney Green CDP, North Carolina",14061),
("Pink Hill town, North Carolina",437),
("Pinnacle CDP, North Carolina",1010),
("Pittsboro town, North Carolina",4098),
("Plain View CDP, North Carolina",1657),
("Pleasant Garden town, North Carolina",4814),
("Pleasant Hill CDP, North Carolina",851),
("Plymouth town, North Carolina",3542),
("Polkton town, North Carolina",3187),
("Polkville city, North Carolina",527),
("Pollocksville town, North Carolina",485),
("Porters Neck CDP, North Carolina",7802),
("Potters Hill CDP, North Carolina",632),
("Powellsville town, North Carolina",249),
("Princeton town, North Carolina",944),
("Princeville town, North Carolina",2357),
("Proctorville town, North Carolina",142),
("Prospect CDP, North Carolina",858),
("Pumpkin Center CDP, North Carolina",1708),
("Raeford city, North Carolina",4882),
("Raemon CDP, North Carolina",182),
("Raleigh city, North Carolina",457159),
("Ramseur town, North Carolina",1738),
("Randleman city, North Carolina",4111),
("Ranlo town, North Carolina",3556),
("Raynham town, North Carolina",53),
("Red Cross town, North Carolina",759),
("Red Oak town, North Carolina",3411),
("Red Springs town, North Carolina",3419),
("Reidsville city, North Carolina",13909),
("Rennert town, North Carolina",386),
("Rex CDP, North Carolina",0),
("Rhodhiss town, North Carolina",1009),
("Richfield town, North Carolina",549),
("Richlands town, North Carolina",2524),
("Rich Square town, North Carolina",898),
("Riegelwood CDP, North Carolina",712),
("River Bend town, North Carolina",3065),
("River Road CDP, North Carolina",3352),
("Roanoke Rapids city, North Carolina",14937),
("Robbins town, North Carolina",1743),
("Robbinsville town, North Carolina",656),
("Robersonville town, North Carolina",1647),
("Rockfish CDP, North Carolina",3619),
("Rockingham city, North Carolina",8994),
("Rockwell town, North Carolina",1811),
("Rocky Mount city, North Carolina",54982),
("Rocky Point CDP, North Carolina",1772),
("Rodanthe CDP, North Carolina",125),
("Rolesville town, North Carolina",6957),
("Ronda town, North Carolina",589),
("Roper town, North Carolina",646),
("Roseboro town, North Carolina",1249),
("Rose Hill town, North Carolina",2547),
("Rosman town, North Carolina",487),
("Rougemont CDP, North Carolina",695),
("Rowland town, North Carolina",1119),
("Roxboro city, North Carolina",8283),
("Roxobel town, North Carolina",375),
("Royal Pines CDP, North Carolina",4921),
("Ruffin CDP, North Carolina",359),
("Rural Hall town, North Carolina",3137),
("Ruth town, North Carolina",531),
("Rutherford College town, North Carolina",1253),
("Rutherfordton town, North Carolina",4044),
("St. Helena village, North Carolina",388),
("St. James town, North Carolina",5133),
("St. Pauls town, North Carolina",1925),
("St. Stephens CDP, North Carolina",9254),
("Salem CDP, North Carolina",1872),
("Salemburg town, North Carolina",415),
("Salisbury city, North Carolina",33652),
("Saluda city, North Carolina",864),
("Salvo CDP, North Carolina",70),
("Sandy Creek town, North Carolina",224),
("Sandyfield town, North Carolina",371),
("Sanford city, North Carolina",29274),
("Saratoga town, North Carolina",502),
("Sawmills town, North Carolina",5159),
("Saxapahaw CDP, North Carolina",1355),
("Scotch Meadows CDP, North Carolina",419),
("Scotland Neck town, North Carolina",1380),
("Seaboard town, North Carolina",736),
("Sea Breeze CDP, North Carolina",1861),
("Seagrove town, North Carolina",348),
("Sedalia town, North Carolina",513),
("Selma town, North Carolina",6577),
("Seven Devils town, North Carolina",304),
("Seven Lakes CDP, North Carolina",4404),
("Seven Springs town, North Carolina",70),
("Severn town, North Carolina",237),
("Shallotte town, North Carolina",3961),
("Shannon CDP, North Carolina",331),
("Sharpsburg town, North Carolina",2158),
("Shelby city, North Carolina",20053),
("Siler City town, North Carolina",8106),
("Silver City CDP, North Carolina",912),
("Silver Lake CDP, North Carolina",6330),
("Simpson village, North Carolina",428),
("Sims town, North Carolina",510),
("Skippers Corner CDP, North Carolina",2583),
("Smithfield town, North Carolina",12117),
("Sneads Ferry CDP, North Carolina",2797),
("Snow Hill town, North Carolina",1604),
("Southern Pines town, North Carolina",13694),
("Southern Shores town, North Carolina",2850),
("South Henderson CDP, North Carolina",956),
("South Mills CDP, North Carolina",499),
("Southmont CDP, North Carolina",978),
("Southport city, North Carolina",3585),
("South Rosemary CDP, North Carolina",2288),
("South Weldon CDP, North Carolina",878),
("Sparta town, North Carolina",1851),
("Speed town, North Carolina",73),
("Spencer town, North Carolina",3231),
("Spencer Mountain town, North Carolina",0),
("Spindale town, North Carolina",4231),
("Spivey's Corner CDP, North Carolina",511),
("Spring Hope town, North Carolina",1603),
("Spring Lake town, North Carolina",12284),
("Spruce Pine town, North Carolina",2204),
("Staley town, North Carolina",297),
("Stallings town, North Carolina",15240),
("Stanfield town, North Carolina",1498),
("Stanley town, North Carolina",3643),
("Stantonsburg town, North Carolina",683),
("Star town, North Carolina",830),
("Statesville city, North Carolina",26263),
("Stedman town, North Carolina",1325),
("Stem town, North Carolina",1048),
("Stokes CDP, North Carolina",716),
("Stokesdale town, North Carolina",5367),
("Stoneville town, North Carolina",1300),
("Stonewall town, North Carolina",279),
("Stony Point CDP, North Carolina",2301),
("Stovall town, North Carolina",362),
("Sugar Mountain village, North Carolina",570),
("Summerfield town, North Carolina",11044),
("Sunbury CDP, North Carolina",224),
("Sunset Beach town, North Carolina",3826),
("Surf City town, North Carolina",3264),
("Swannanoa CDP, North Carolina",4288),
("Swan Quarter CDP, North Carolina",474),
("Swansboro town, North Carolina",3145),
("Swepsonville town, North Carolina",1844),
("Sylva town, North Carolina",2672),
("Tabor City town, North Carolina",3990),
("Tarboro town, North Carolina",11045),
("Tar Heel town, North Carolina",228),
("Taylorsville town, North Carolina",2278),
("Taylortown town, North Carolina",782),
("Teachey town, North Carolina",392),
("Thomasville city, North Carolina",26839),
("Toast CDP, North Carolina",1410),
("Tobaccoville village, North Carolina",2617),
("Topsail Beach town, North Carolina",376),
("Trenton town, North Carolina",408),
("Trent Woods town, North Carolina",4081),
("Trinity city, North Carolina",6621),
("Troutman town, North Carolina",2634),
("Troy town, North Carolina",3348),
("Tryon town, North Carolina",1578),
("Turkey town, North Carolina",260),
("Tyro CDP, North Carolina",3903),
("Unionville town, North Carolina",6826),
("Valdese town, North Carolina",4418),
("Valle Crucis CDP, North Carolina",223),
("Valley Hill CDP, North Carolina",2148),
("Vanceboro town, North Carolina",1291),
("Vandemere town, North Carolina",271),
("Vander CDP, North Carolina",1218),
("Vann Crossroads CDP, North Carolina",285),
("Varnamtown town, North Carolina",476),
("Vass town, North Carolina",658),
("Waco town, North Carolina",300),
("Wade town, North Carolina",729),
("Wadesboro town, North Carolina",5351),
("Wagram town, North Carolina",777),
("Wake Forest town, North Carolina",40159),
("Wakulla CDP, North Carolina",90),
("Walkertown town, North Carolina",5043),
("Wallace town, North Carolina",3922),
("Wallburg town, North Carolina",3029),
("Walnut Cove town, North Carolina",1599),
("Walnut Creek village, North Carolina",1130),
("Walstonburg town, North Carolina",182),
("Wanchese CDP, North Carolina",1832),
("Warrenton town, North Carolina",1056),
("Warsaw town, North Carolina",3131),
("Washington city, North Carolina",9649),
("Washington Park town, North Carolina",584),
("Watha town, North Carolina",178),
("Waves CDP, North Carolina",54),
("Waxhaw town, North Carolina",14383),
("Waynesville town, North Carolina",9898),
("Weaverville town, North Carolina",3887),
("Webster town, North Carolina",444),
("Weddington town, North Carolina",10574),
("Welcome CDP, North Carolina",4274),
("Weldon town, North Carolina",1679),
("Wendell town, North Carolina",6828),
("Wentworth town, North Carolina",2724),
("Wesley Chapel village, North Carolina",8633),
("West Canton CDP, North Carolina",1257),
("West Jefferson town, North Carolina",1485),
("West Marion CDP, North Carolina",1403),
("Westport CDP, North Carolina",4119),
("Whispering Pines village, North Carolina",3221),
("Whitakers town, North Carolina",921),
("White Lake town, North Carolina",1072),
("White Oak CDP, North Carolina",260),
("White Plains CDP, North Carolina",1150),
("Whiteville city, North Carolina",5450),
("Whitsett town, North Carolina",1018),
("Wilkesboro town, North Carolina",3505),
("Williamston town, North Carolina",5375),
("Wilmington city, North Carolina",118094),
("Wilson city, North Carolina",49230),
("Wilson's Mills town, North Carolina",2552),
("Windsor town, North Carolina",3523),
("Winfall town, North Carolina",653),
("Wingate town, North Carolina",4078),
("Winston-Salem city, North Carolina",242125),
("Winterville town, North Carolina",9546),
("Winton town, North Carolina",849),
("Woodfin town, North Carolina",6487),
("Woodland town, North Carolina",880),
("Woodlawn CDP, North Carolina",804),
("Wrightsboro CDP, North Carolina",5233),
("Wrightsville Beach town, North Carolina",2536),
("Yadkinville town, North Carolina",2908),
("Yanceyville town, North Carolina",2248),
("Youngsville town, North Carolina",1766),
("Zebulon town, North Carolina",5166),
("Abercrombie city, North Dakota",216),
("Adams city, North Dakota",165),
("Alamo city, North Dakota",44),
("Alexander city, North Dakota",316),
("Alice city, North Dakota",29),
("Almont city, North Dakota",117),
("Alsen city, North Dakota",52),
("Ambrose city, North Dakota",11),
("Amenia city, North Dakota",91),
("Amidon city, North Dakota",20),
("Anamoose city, North Dakota",257),
("Aneta city, North Dakota",278),
("Antler city, North Dakota",30),
("Ardoch city, North Dakota",49),
("Argusville city, North Dakota",521),
("Arnegard city, North Dakota",55),
("Arthur city, North Dakota",326),
("Ashley city, North Dakota",604),
("Auburn CDP, North Dakota",19),
("Ayr city, North Dakota",11),
("Balfour city, North Dakota",23),
("Balta city, North Dakota",128),
("Bantry city, North Dakota",8),
("Barney city, North Dakota",53),
("Barton CDP, North Dakota",0),
("Bathgate city, North Dakota",66),
("Beach city, North Dakota",1015),
("Belcourt CDP, North Dakota",2038),
("Belfield city, North Dakota",975),
("Benedict city, North Dakota",77),
("Bergen city, North Dakota",11),
("Berlin city, North Dakota",42),
("Berthold city, North Dakota",459),
("Beulah city, North Dakota",3296),
("Binford city, North Dakota",178),
("Bisbee city, North Dakota",112),
("Bismarck city, North Dakota",71731),
("Blanchard CDP, North Dakota",6),
("Bottineau city, North Dakota",2199),
("Bowbells city, North Dakota",347),
("Bowdon city, North Dakota",168),
("Bowman city, North Dakota",1552),
("Braddock city, North Dakota",12),
("Briarwood city, North Dakota",47),
("Brinsmade city, North Dakota",30),
("Brocket city, North Dakota",53),
("Brooktree Park CDP, North Dakota",106),
("Buchanan city, North Dakota",73),
("Bucyrus city, North Dakota",10),
("Buffalo city, North Dakota",226),
("Burlington city, North Dakota",1477),
("Butte city, North Dakota",91),
("Buxton city, North Dakota",426),
("Caledonia CDP, North Dakota",33),
("Calio city, North Dakota",3),
("Calvin city, North Dakota",8),
("Cando city, North Dakota",1110),
("Cannon Ball CDP, North Dakota",945),
("Canton City city, North Dakota",24),
("Carpio city, North Dakota",139),
("Carrington city, North Dakota",2020),
("Carson city, North Dakota",239),
("Casselton city, North Dakota",2508),
("Cathay city, North Dakota",29),
("Cavalier city, North Dakota",1191),
("Cayuga city, North Dakota",56),
("Center city, North Dakota",604),
("Christine city, North Dakota",175),
("Churchs Ferry city, North Dakota",8),
("Cleveland city, North Dakota",88),
("Clifford city, North Dakota",25),
("Cogswell city, North Dakota",71),
("Coleharbor city, North Dakota",63),
("Colfax city, North Dakota",180),
("Columbus city, North Dakota",127),
("Conway city, North Dakota",17),
("Cooperstown city, North Dakota",1104),
("Courtenay city, North Dakota",58),
("Crary city, North Dakota",78),
("Crosby city, North Dakota",1056),
("Crystal city, North Dakota",109),
("Dahlen CDP, North Dakota",46),
("Davenport city, North Dakota",291),
("Dawson city, North Dakota",39),
("Dazey city, North Dakota",60),
("Deering city, North Dakota",114),
("De Lamere CDP, North Dakota",27),
("Denhoff CDP, North Dakota",25),
("Des Lacs city, North Dakota",220),
("Devils Lake city, North Dakota",7317),
("Dickey city, North Dakota",38),
("Dickinson city, North Dakota",22707),
("Dodge city, North Dakota",145),
("Donnybrook city, North Dakota",70),
("Douglas city, North Dakota",59),
("Drake city, North Dakota",429),
("Drayton city, North | |
+ (256*mckin*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(243*mbkin) - (256*mckin**2*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(243*mbkin**2) + (128*mckin**3*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(81*mbkin**3) - (128*mckin**4*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(81*mbkin**4) - (256*mckin**5*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(81*mbkin**5) + (256*mckin**6*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(81*mbkin**6) + (128*mckin**7*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(243*mbkin**7) - (128*mckin**8*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(243*mbkin**8) + (256*mckin**3*np.log(mckin**2/mbkin**2)*
(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/(81*mbkin**3) -
(256*mckin**4*np.log(mckin**2/mbkin**2)*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/
(81*mbkin**4) + (32*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4)*
(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/243 -
(40*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4)*
((2*(-234 + 9*np.pi**2 + 81*np.log(2/mus)))/27 + (2*np.log(mus**2/mckin**2))/
9))/9 + (496*np.log(mus**2/mckin**2))/27 +
(512*mbkin*np.log(mus**2/mckin**2))/(81*mckin) +
(7424*mckin*np.log(mus**2/mckin**2))/(81*mbkin) -
(6016*mckin**2*np.log(mus**2/mckin**2))/(81*mbkin**2) -
(1024*mckin**3*np.log(mus**2/mckin**2))/(9*mbkin**3) +
(1792*mckin**5*np.log(mus**2/mckin**2))/(81*mbkin**5) +
(1664*mckin**6*np.log(mus**2/mckin**2))/(27*mbkin**6) -
(512*mckin**7*np.log(mus**2/mckin**2))/(81*mbkin**7) -
(464*mckin**8*np.log(mus**2/mckin**2))/(81*mbkin**8) +
(512*mckin*np.log(mckin**2/mbkin**2)*np.log(mus**2/mckin**2))/(9*mbkin) +
(512*mckin**2*np.log(mckin**2/mbkin**2)*np.log(mus**2/mckin**2))/(27*mbkin**2) +
(1024*mckin**3*np.log(mckin**2/mbkin**2)*np.log(mus**2/mckin**2))/
(27*mbkin**3) - (320*mckin**4*np.log(mckin**2/mbkin**2)*
np.log(mus**2/mckin**2))/(3*mbkin**4) -
(608*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4)*
np.log(mus**2/mckin**2))/81 - (2*(-17 + (16*mckin**2)/mbkin**2 +
(12*mckin**4)/mbkin**4 - (16*mckin**6)/mbkin**6 +
(5*mckin**8)/mbkin**8 - 12*np.log(mckin**2/mbkin**2))*
((-126695.04872806392 + 306504*np.log(2/mus) - 11664*np.pi**2*np.log(
2/mus) - 52488*np.log(2/mus)**2)/2916 -
(2*(-147 + 6*np.pi**2 + 54*np.log(2/mus))*np.log(mus**2/mckin**2))/81 +
(21 + 57*np.log(mus**2/mckin**2) - 2*np.log(mus**2/mckin**2)**2)/81))/3))/
mbkin**3 + (api4*(-20/3 - (112*mckin**2)/(3*mbkin**2) +
(48*mckin**4)/mbkin**4 - (16*mckin**6)/(3*mbkin**6) +
(4*mckin**8)/(3*mbkin**8) - (32*mckin**2*np.log(mckin**2/mbkin**2))/
mbkin**2 - (16*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4) +
api4**2*(-6273112628327/15320660355 + (2649349310527609*mbkin)/
(92373368167080*mckin) + (511262019577991257*mckin)/
(423377937432450*mbkin) - (8110322966684879*mckin**2)/
(1387871585100*mbkin**2) + (14471367571252478657*mckin**3)/
(802189776187800*mbkin**3) - (324339584899681999*mckin**4)/
(7864605648900*mbkin**4) + (90948914036368172*mckin**5)/
(1179690847335*mbkin**5) - (30666968564167367*mckin**6)/
(280878773175*mbkin**6) + (47194525665047*mckin**7)/
(420319900*mbkin**7) - (282219995801063*mckin**8)/
(3878010675*mbkin**8) - (3597890666443*mckin**9)/
(41247931725*mbkin**9) + (401662703898359*mckin**10)/
(5499724230*mbkin**10) - (18530565542904137*mckin**11)/
(164991726900*mbkin**11) + (1277483849769067349*mckin**12)/
(11796908473350*mbkin**12) - (151400853599994512*mckin**13)/
(1966151412225*mbkin**13) + (35484451873152599*mckin**14)/
(842636319525*mbkin**14) - (18702555837085577*mckin**15)/
(1048614086520*mbkin**15) + (64399203016910624*mckin**16)/
(11141524669275*mbkin**16) - (5468757565122931*mckin**17)/
(3932302824450*mbkin**17) + (162592894134003649*mckin**18)/
(692800261253100*mbkin**18) - (22978828361473*mckin**19)/
(925247723400*mbkin**19) + (4916068298621*mckin**20)/
(3958858635732*mbkin**20) + (16*np.pi**2)/3 + (8*mckin**2*np.pi**2)/
(3*mbkin**2) - (24*mckin**4*np.pi**2)/mbkin**4 + (56*mckin**6*np.pi**2)/
(3*mbkin**6) - (8*mckin**8*np.pi**2)/(3*mbkin**8) -
(5792*mckin**2*np.log(mckin**2/mbkin**2))/(9*mbkin**2) -
(4096*mckin**3*np.log(mckin**2/mbkin**2))/(27*mbkin**3) +
(21472*mckin**4*np.log(mckin**2/mbkin**2))/(27*mbkin**4) +
(16*mckin**2*np.pi**2*np.log(mckin**2/mbkin**2))/mbkin**2 -
(16*mckin**4*np.pi**2*np.log(mckin**2/mbkin**2))/mbkin**4 +
(6070*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4))/
81 - (5*np.pi**2*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4))/3 +
(8192*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/315 -
(4096*mbkin*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(945*mckin) -
(8192*mckin*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(135*mbkin) +
(8192*mckin**2*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(135*mbkin**2) - (8192*mckin**4*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(135*mbkin**4) +
(8192*mckin**5*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(135*mbkin**5) - (8192*mckin**6*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(315*mbkin**6) +
(4096*mckin**7*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(945*mbkin**7) - (2048*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
135 + (2048*mbkin*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(945*mckin) + (8192*mckin*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(189*mbkin) - (8192*mckin**2*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(135*mbkin**2) +
(4096*mckin**3*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(135*mbkin**3) + (4096*mckin**4*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(135*mbkin**4) -
(8192*mckin**5*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(135*mbkin**5) + (8192*mckin**6*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(189*mbkin**6) -
(2048*mckin**7*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(135*mbkin**7) + (2048*mckin**8*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(945*mbkin**8) +
(8192*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/2835 -
(1024*mbkin*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(2835*mckin) -
(1024*mckin*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(105*mbkin) +
(16384*mckin**2*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(945*mbkin**2) - (2048*mckin**3*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(135*mbkin**3) +
(2048*mckin**5*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(135*mbkin**5) - (16384*mckin**6*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(945*mbkin**6) +
(1024*mckin**7*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(105*mbkin**7) - (8192*mckin**8*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(2835*mbkin**8) +
(1024*mckin**9*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(2835*mbkin**9) + (512*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
315 - (512*mbkin*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(2835*mckin) - (512*mckin*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(81*mbkin) +
(2560*mckin**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(189*mbkin**2) - (1024*mckin**3*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(63*mbkin**3) +
(1024*mckin**4*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(135*mbkin**4) + (1024*mckin**5*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(135*mbkin**5) -
(1024*mckin**6*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(63*mbkin**6) + (2560*mckin**7*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(189*mbkin**7) -
(512*mckin**8*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(81*mbkin**8) + (512*mckin**9*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(315*mbkin**9) -
(512*mckin**10*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(2835*mbkin**10) + (9728*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
6237 - (4864*mbkin*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(31185*mckin) - (19456*mckin*(1 + 11*np.log(2) +
11*np.log(1 - mckin/mbkin)))/(2835*mbkin) +
(9728*mckin**2*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(567*mbkin**2) - (4864*mckin**3*(1 + 11*np.log(2) +
11*np.log(1 - mckin/mbkin)))/(189*mbkin**3) +
(19456*mckin**4*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(945*mbkin**4) - (19456*mckin**6*(1 + 11*np.log(2) +
11*np.log(1 - mckin/mbkin)))/(945*mbkin**6) +
(4864*mckin**7*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(189*mbkin**7) - (9728*mckin**8*(1 + 11*np.log(2) +
11*np.log(1 - mckin/mbkin)))/(567*mbkin**8) +
(19456*mckin**9*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(2835*mbkin**9) - (9728*mckin**10*(1 + 11*np.log(2) +
11*np.log(1 - mckin/mbkin)))/(6237*mbkin**10) +
(4864*mckin**11*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(31185*mbkin**11) + (128*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
81 - (128*mbkin*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
(891*mckin) - (256*mckin*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
(33*mbkin) + (1792*mckin**2*(1 + 12*np.log(2) +
12*np.log(1 - mckin/mbkin)))/(81*mbkin**2) -
(3200*mckin**3*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
(81*mbkin**3) + (128*mckin**4*(1 + 12*np.log(2) +
12*np.log(1 - mckin/mbkin)))/(3*mbkin**4) -
(512*mckin**5*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
(27*mbkin**5) - (512*mckin**6*(1 + 12*np.log(2) +
12*np.log(1 - mckin/mbkin)))/(27*mbkin**6) +
(128*mckin**7*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
(3*mbkin**7) - (3200*mckin**8*(1 + 12*np.log(2) +
12*np.log(1 - mckin/mbkin)))/(81*mbkin**8) +
(1792*mckin**9*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
(81*mbkin**9) - (256*mckin**10*(1 + 12*np.log(2) +
12*np.log(1 - mckin/mbkin)))/(33*mbkin**10) +
(128*mckin**11*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/
(81*mbkin**11) - (128*mckin**12*(1 + 12*np.log(2) +
12*np.log(1 - mckin/mbkin)))/(891*mbkin**12) +
(2816*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/1755 -
(704*mbkin*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(5265*mckin) -
(704*mckin*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(81*mbkin) +
(11264*mckin**2*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/
(405*mbkin**2) - (7744*mckin**3*(1 + 13*np.log(2) +
13*np.log(1 - mckin/mbkin)))/(135*mbkin**3) +
(30976*mckin**4*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/
(405*mbkin**4) - (7744*mckin**5*(1 + 13*np.log(2) +
13*np.log(1 - mckin/mbkin)))/(135*mbkin**5) +
(7744*mckin**7*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/
(135*mbkin**7) - (30976*mckin**8*(1 + 13*np.log(2) +
13*np.log(1 - mckin/mbkin)))/(405*mbkin**8) +
(7744*mckin**9*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/
(135*mbkin**9) - (11264*mckin**10*(1 + 13*np.log(2) +
13*np.log(1 - mckin/mbkin)))/(405*mbkin**10) +
(704*mckin**11*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/
(81*mbkin**11) - (2816*mckin**12*(1 + 13*np.log(2) +
13*np.log(1 - mckin/mbkin)))/(1755*mbkin**12) +
(704*mckin**13*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/
(5265*mbkin**13) + (50656*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
31185 - (50656*mbkin*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(405405*mckin) - (50656*mckin*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(5265*mbkin) +
(50656*mckin**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(1485*mbkin**2) - (354592*mckin**3*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(4455*mbkin**3) +
(50656*mckin**4*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(405*mbkin**4) - (50656*mckin**5*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(405*mbkin**5) +
(50656*mckin**6*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(945*mbkin**6) + (50656*mckin**7*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(945*mbkin**7) -
(50656*mckin**8*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(405*mbkin**8) + (50656*mckin**9*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(405*mbkin**9) -
(354592*mckin**10*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(4455*mbkin**10) + (50656*mckin**11*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(1485*mbkin**11) -
(50656*mckin**12*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(5265*mbkin**12) + (50656*mckin**13*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(31185*mbkin**13) -
(50656*mckin**14*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(405405*mbkin**14) + (94976*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/57915 -
(6784*mbkin*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(57915*mckin) - (13568*mckin*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(1287*mbkin) +
(474880*mckin**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(11583*mbkin**2) - (94976*mckin**3*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(891*mbkin**3) +
(94976*mckin**4*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(495*mbkin**4) - (94976*mckin**5*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(405*mbkin**5) +
(13568*mckin**6*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(81*mbkin**6) - (13568*mckin**8*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(81*mbkin**8) +
(94976*mckin**9*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(405*mbkin**9) - (94976*mckin**10*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(495*mbkin**10) +
(94976*mckin**11*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(891*mbkin**11) - (474880*mckin**12*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(11583*mbkin**12) +
(13568*mckin**13*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(1287*mbkin**13) - (94976*mckin**14*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(57915*mbkin**14) +
(6784*mckin**15*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(57915*mbkin**15) + (44656*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/27027 -
(44656*mbkin*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(405405*mckin) - (357248*mckin*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(31185*mbkin) +
(357248*mckin**2*(1 + | |
<reponame>JuanFMontesinos/TorchTree
from collections import OrderedDict
def _addindent(s_, numSpaces):
s = s_.split('\n')
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
class Tree(object):
def __init__(self):
self._parameters = 'abrete sesamo'
self._state_dict_hooks = 'abrete sesamo'
self._load_state_dict_pre_hooks = 'abrete sesamo'
self._modules = 'abrete sesamo'
self._tree_properties = 'abrete sesamo'
self.set_level(0)
def level(self):
return self._tree_properties.get('level')
def set_level(self, value):
self._tree_properties.update({'level': value})
def register_parameter(self, name, param):
r"""Adds a parameter to the module.
The parameter can be accessed as an attribute using given name.
Args:
name (string): name of the parameter. The parameter can be accessed
from this module using the given name
param (Parameter): parameter to be added to the module.
"""
if '_parameters' not in self.__dict__:
raise AttributeError(
"cannot assign parameter before Module.__init__() call")
elif not isinstance(name, str):
raise TypeError("parameter name should be a string. "
"Got {}".format(type(name)))
elif '.' in name:
raise KeyError("parameter name can't contain \".\"")
elif name == '':
raise KeyError("parameter name can't be empty string \"\"")
elif hasattr(self, name) and name not in self._parameters:
raise KeyError("attribute '{}' already exists".format(name))
if isinstance(param, Tree):
raise TypeError("parameter cannot be a Tree object. Use add_module to add nodes.")
else:
self._parameters[name] = param
def add_module(self, name, module):
r"""Adds a child module to the current module.
The module can be accessed as an attribute using the given name.
Args:
name (string): name of the child module. The child module can be
accessed from this module using the given name
module (Module): child module to be added to the module.
"""
if not isinstance(module, Tree) and module is not None:
raise TypeError("{} is not a Module subclass".format(
type(module)))
elif not isinstance(name, str):
raise TypeError("module name should be a string. Got {}".format(
type(name)))
elif hasattr(self, name) and name not in self._modules:
raise KeyError("attribute '{}' already exists".format(name))
elif '.' in name:
raise KeyError("module name can't contain \".\"")
elif name == '':
raise KeyError("module name can't be empty string \"\"")
module.set_level(self.level() + 1)
self._modules[name] = module
def _apply(self, fn):
for module in self.children():
module._apply(fn)
for key, buf in self._parameters.items():
if buf is not None:
self._parameters[key] = fn(buf)
return self
def apply(self, fn):
r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)
as well as self. Typical use includes initializing the parameters of a model
(see also :ref:`torch-nn-init`).
Args:
fn (:class:`Module` -> None): function to be applied to each submodule
Returns:
Module: self
"""
for module in self.children():
module.apply(fn)
fn(self)
return self
def _tracing_name(self, tracing_state):
if not tracing_state._traced_module_stack:
return None
module = tracing_state._traced_module_stack[-1]
for name, child in module.named_children():
if child is self:
return name
return None
def __setstate__(self, state):
self.__dict__.update(state)
# Support loading old checkpoints that don't have the following attrs:
if '_state_dict_hooks' not in self.__dict__:
self._state_dict_hooks = OrderedDict()
if '_load_state_dict_pre_hooks' not in self.__dict__:
self._load_state_dict_pre_hooks = OrderedDict()
def __getattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return _parameters[name]
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return modules[name]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name))
def __setattr__(self, name, value):
def remove_from(*dicts):
for d in dicts:
if name in d:
del d[name]
if value == 'abrete sesamo':
object.__setattr__(self, name, OrderedDict())
elif isinstance(value, Tree):
modules = self.__dict__.get('_modules')
if modules is None:
raise AttributeError(
"cannot assign module before Module.__init__() call")
remove_from(self.__dict__, self._parameters)
modules[name] = value
else:
params = self.__dict__.get('_parameters')
if params is None:
raise AttributeError(
"cannot assign parameters before Module.__init__() call")
remove_from(self.__dict__, self._modules)
self.register_parameter(name, value)
def __delattr__(self, name):
if name in self._parameters:
del self._parameters[name]
elif name in self._modules:
del self._modules[name]
else:
object.__delattr__(self, name)
def __call__(self, *args):
tmp = self
for param in args:
tmp = tmp.__getattr__(param)
return tmp
def state_dict(self, destination=None, prefix='', keep_vars=False):
r"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
Returns:
dict:
a dictionary containing a whole state of the module
Example::
>>> module.state_dict().keys()
['bias', 'weight']
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(level=self.level())
for name, param in self._parameters.items():
if param is not None:
destination[prefix + name] = param
for name, module in self._modules.items():
if module is not None:
module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)
for hook in self._state_dict_hooks.values():
hook_result = hook(self, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def parameters(self, recurse=True):
r"""Returns an iterator over module parameters.
This is typically passed to an optimizer.
Args:
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
Parameter: module parameter
"""
for name, param in self.named_parameters(recurse=recurse):
yield param
def named_parameters(self, prefix='', recurse=True):
r"""Returns an iterator over module parameters, yielding both the
name of the parameter as well as the parameter itself.
Args:
prefix (str): prefix to prepend to all parameter names.
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
(string, Parameter): Tuple containing the name and parameter
"""
gen = self._named_members(
lambda module: module._parameters.items(),
prefix=prefix, recurse=recurse)
for elem in gen:
yield elem
def named_children(self):
r"""Returns an iterator over immediate children modules, yielding both
the name of the module as well as the module itself.
Yields:
(string, Module): Tuple containing a name and child module
"""
memo = set()
for name, module in self._modules.items():
if module is not None and module not in memo:
memo.add(module)
yield name, module
def modules(self):
r"""Returns an iterator over all modules in the network.
Yields:
Module: a module in the network
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
"""
for name, module in self.named_modules():
yield module
def children(self):
r"""Returns an iterator over immediate children modules.
Yields:
Module: a child module
"""
for name, module in self.named_children():
yield module
def _named_members(self, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None or v in memo:
continue
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
def named_modules(self, memo=None, prefix=''):
r"""Returns an iterator over all modules in the network, yielding
both the name of the module as well as the module itself.
Yields:
(string, Module): Tuple of name and module
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
"""
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield prefix, self
for name, module in self._modules.items():
if module is None:
continue
submodule_prefix = prefix + ('.' if prefix else '') + name
for m in module.named_modules(memo, submodule_prefix):
yield m
def __repr__(self):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self._modules.items():
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
def extra_repr(self):
r"""Set the extra representation of the module
To print customized extra information, you should reimplement
this method in your own modules. Both single-line and multi-line
strings are acceptable.
"""
return ''
def _get_name(self):
return | |
<reponame>padfoot27/merlin
import click
import tmdbsimple as tmdb
import time
tmdb.API_KEY = 'deb5d2f55e284931baf4f7e7020cfe44'
genreDict = {u'Action': 28,
u'Adventure': 12,
u'Animation': 16,
u'Comedy': 35,
u'Crime': 80,
u'Documentary': 99,
u'Drama': 18,
u'Family': 10751,
u'Fantasy': 14,
u'Foreign': 10769,
u'History': 36,
u'Horror': 27,
u'Music': 10402,
u'Mystery': 9648,
u'Romance': 10749,
u'Science Fiction': 878,
u'TV Movie': 10770,
u'Thriller': 53,
u'War': 10752,
u'Western': 37}
numToGenre = {1: u'Action',
2: u'Adventure',
3: u'Animation',
4: u'Comedy',
5: u'Crime',
6: u'Documentary',
7: u'Drama',
8: u'Family',
9: u'Fantasy',
10: u'Foreign',
11: u'History',
12: u'Horror',
13: u'Music',
14: u'Mystery',
15: u'Romance',
16: u'Science Fiction',
17: u'TV Movie',
18: u'Thriller',
19: u'War',
20: u'Western'}
# Movie class
class Movie:
def __init__(self,title,ID,lan,genres,overview,rating,release_date):
self.title = title
self.ID = ID
self.lan = lan
self.genres = genres
self.overview = overview
self.rating = rating
self.release_date = release_date
def get_title(self):
return self.title
def get_id(self):
return self.ID
def get_lan(self):
return self.lan
def get_genres(self):
return self.genres
def get_overview(self):
return self.overview
def get_rating(self):
return self.rating
def get_release_date(self):
return self.release_date
def findTrailer(self):
movie = tmdb.Movies(self.get_id())
tries = 0
trailer = []
while tries < 3:
tries += 1
try:
trailer = movie.videos()
trailer = trailer['results']
break
except:
continue
result = []
for i in xrange(len(trailer)):
result.append('https://www.youtube.com/watch?v=' + trailer[i]['key'].encode('ascii','ignore'))
return result
def findCastAndCrew(self):
movie = tmdb.Movies(self.get_id())
tries = 0
cast = []
crew = []
while tries < 3:
tries += 1
try:
credits = movie.credits()
cast = credits['cast']
crew = credits['crew']
break
except:
continue
ca = []
cr = []
for i in xrange(len(cast)):
ca.append(cast[i]['name'].encode('ascii','ignore') + ' As ' + cast[i]['character'].encode('ascii','ignore'))
for i in xrange(len(crew)):
cr.append((crew[i]['name'].encode('ascii','ignore'),crew[i]['job'].encode('ascii','ignore')))
return ca,cr
def findKeywords(self):
movie = tmdb.Movies(self.get_id())
keywords = []
tries = 0
while tries < 3:
tries += 1
try:
keywords = movie.keywords()
keywords = keywords['keywords']
break
except:
continue
result = ''
for i in xrange(len(keywords)):
result += keywords[i]['name'].encode('ascii','ignore') + ','
return result[:-1]
def __repr__(self):
return self.title
# Helper Function
def findPerson(name):
search = tmdb.Search()
nID = {}
tries = 0
while tries < 3:
tries += 1
try:
response = search.person(query=name)
for i in xrange(3):
nID[(i + 1,response['results'][i]['name'])] = response['results'][i]['id']
break
except:
continue
return nID
# Find movies based on the given data
def discoverMovie(genre,cast,crew,language,year,page):
discover = tmdb.Discover()
tries = 0
response = None
date = ''
if year:
date = year + '-01-01'
kwargs = {'with_cast':cast,'with_crew':crew,'with_genres':genre,'language':language,'primary_release_date.gte':date,'page':page}
while tries < 3:
tries += 1
try:
response = discover.movie(**kwargs)
break
except:
continue
r = None
if response:
r = response['results']
result = {}
if response:
for i in xrange(len(r)):
m = Movie(r[i]['title'],r[i]['id'],r[i]['original_language'],r[i]['genre_ids'],r[i]['overview'],r[i]['vote_average'],r[i]['release_date'])
result[i + 1] = m
if response:
return result,response['total_pages'],response['total_results']
else:
return result,0,0
# Search for a movie
def findMovie(name,page):
search = tmdb.Search()
tries = 0
response = None
kwargs = {'query':name,'page':page}
while tries < 3:
tries += 1
try:
response = search.movie(**kwargs)
break
except:
continue
r = None
if response:
r = response['results']
result = {}
if response:
for i in xrange(len(r)):
m = Movie(r[i]['title'],r[i]['id'],r[i]['original_language'],r[i]['genre_ids'],r[i]['overview'],r[i]['vote_average'],r[i]['release_date'])
result[i + 1] = m
if response:
return result,response['total_pages'],response['total_results']
else:
return result,0,0
# Display the movies
def displayMovie(movies,wantQuit):
for key in sorted(movies.iterkeys()):
click.echo(movies[key].get_title() + ' ' + str(movies[key].get_rating())),
click.echo(movies[key].get_release_date())
click.echo(movies[key].get_overview())
click.echo('\n')
wantDetails = click.confirm('Do you want more details regarding this movie ')
click.echo('\n')
if wantDetails:
click.echo('Wait up\n')
cast,crew = movies[key].findCastAndCrew()
click.echo('Cast')
for i in xrange(min(len(cast),12)):
click.echo(cast[i])
click.echo('\n')
click.echo('Crew')
for i in xrange(len(crew)):
if (crew[i][1] in ['Director','Screenplay','Editor','Producer','Writer','Original Music Composer']):
click.echo(crew[i][0] + ' ... ' + crew[i][1])
click.echo('\n')
click.echo('Getting the keywords for the movie\n')
keywords = movies[key].findKeywords()
if keywords:
click.echo(keywords)
else:
click.echo('Nothing Found')
click.echo('\n')
wantTrailer = click.confirm('Would you like to see the trailer')
click.echo('\n')
if wantTrailer:
trailer = movies[key].findTrailer()
if trailer:
for i in xrange(len(trailer)):
click.echo(trailer[i])
else:
click.echo('Nothing Found')
click.echo('\n')
wantQuit = click.confirm('Do you want to look at more movies')
click.echo('\n')
if wantQuit == False:
break
return wantQuit
# Pick the movie
def pickMovie(movies):
picked = False
for key in sorted(movies.iterkeys()):
picked = click.confirm('Are you looking for ' + movies[key].get_title() + ' ' + str(movies[key].get_release_date()))
click.echo('\n')
if picked:
return picked,movies[key].get_id()
return picked,-1
# Find similar Movies
def similarMovie(id,page):
movie = tmdb.Movies(id)
tries = 0
response = None
kwargs = {'page':page}
while tries < 3:
tries += 1
try:
response = movie.similar_movies(page=page)
break
except:
continue
r = None
if response:
r = response['results']
result = {}
if response:
for i in xrange(len(r)):
m = Movie(r[i]['title'],r[i]['id'],r[i]['original_language'],r[i]['genre_ids'],r[i]['overview'],r[i]['vote_average'],r[i]['release_date'])
result[i + 1] = m
if response:
return result,response['total_pages'],response['total_results']
else:
return result,0,0
# Starts from here
@click.group(invoke_without_command = True)
@click.pass_context
def discover(ctx):
'''
This is Merlin
He is google's dumb cousin who can search for movies
He is a tad slow, so please be patient with him
He is dumb too, so while telling him about names of the cast or crew, make sure to avoid spelling mistakes
For example, you are looking for <NAME>, you just tell him arnold and he will find him
If you are looking for <NAME>, you just tell him anne, and so on
If you will misspell a name he won't be able to find the intended person
So, just follow the instructions and let Merlin weave his spell
'''
if ctx.invoked_subcommand is None:
click.echo("Hi, I am Merlin, your personal movie recommender\n")
time.sleep(1)
click.echo("So,let's find you a Movie\n")
# Get the Language
language = 'en'
wantLanguage = click.confirm('Do you want to pick the language,default is english')
click.echo('\n')
if wantLanguage:
justEN = click.confirm('All languages')
click.echo('\n')
if justEN:
language = ''
# Get the Genre
genre = ''
wantGenre = click.confirm('Do you want to a pick a genre ')
click.echo('\n')
if wantGenre:
for key in sorted(numToGenre.iterkeys()):
click.echo(str(key) + '. ' + numToGenre[key].encode('ascii','ignore'))
g = click.prompt('\nPick a Genre(comma separated)')
click.echo('\n')
gList = g.split(',')
for i in xrange(len(gList)):
genre += str(genreDict[numToGenre[int(gList[i])]])
genre += ','
genre = genre[:-1]
# Get the year
year = ''
wantYear = click.confirm('Do you want to pick a year, only movies after this year will be picked')
click.echo('\n')
if wantYear:
year = click.prompt('Pick a year (YYYY)')
click.echo('\n')
# Get the Cast
cast = ''
wantCast = click.confirm('Do you want to specify the cast ')
click.echo('\n')
if wantCast:
click.echo('Pick the cast, Be as specific as you can and avoid spelling mistakes\n')
while True:
search = tmdb.Search()
name = click.prompt('Give me a name')
click.echo('\n')
result = findPerson(name)
if (result):
for key in sorted(result.iterkeys()):
add = click.confirm('Are you looking for ' + key[1].encode('ascii','ignore'))
click.echo('\n')
if add:
cast += str(result[key])
cast += ','
break;
else:
click.echo('Sorry, try again')
click.echo('\n')
confirm = click.confirm('You want to add more people')
click.echo('\n')
if (confirm == False):
break
cast = cast[:-1]
# Get the crew
crew = ''
wantCrew = click.confirm('Do you want to specify the crew ')
click.echo('\n')
if wantCrew:
click.echo('Pick the crew, Be as specific as you can and avoid spelling mistakes\n')
while True:
search = tmdb.Search()
name = click.prompt('Give me a name')
click.echo('\n')
result = findPerson(name)
if (result):
for key in sorted(result.iterkeys()):
add = click.confirm('Are you looking for ' + key[1].encode('ascii','ignore'))
click.echo('\n')
if add:
crew += str(result[key])
crew += ','
break;
else:
click.echo('Sorry, try again')
click.echo('\n')
confirm = click.confirm('You want to add more people')
click.echo('\n')
if (confirm == False):
break
crew = crew[:-1]
# Get results
click.echo('Sit back and Relax\n')
page = 1
castSearch = cast
crewSearch = crew
tPages = 0
tResults = 0
while True:
if page > 1:
click.echo('Looking for more results\n')
movies,tPages,tResults = discoverMovie(genre,castSearch,crewSearch,language,year,page)
wantQuit = False
if movies:
page += 1
wantQuit = displayMovie(movies,wantQuit)
click.echo('\n')
if wantQuit == False:
break
else:
if tResults == 0:
click.echo('Nothing found\n')
break
elif page > tPages:
click.echo('End of the results\n')
break
else:
click.echo('Sorry, try again')
#if wantQuit == False:
# break
click.echo("That's all Folks,Merlin says goodbye\n")
@discover.command()
def similar():
# Get the name of the movie
click.echo('\n')
name = click.prompt('Give me the name of the Movie')
click.echo('\n')
click.echo('Fetching the list of movies...\n')
page = 1
id = | |
1px solid\
#31363B;\x0a bo\
rder-radius: 4px\
;\x0a}\x0a\x0aQCalendarWi\
dget:disabled {\x0a\
background-c\
olor: #232629;\x0a \
color: #4D545\
B;\x0a}\x0a\x0a/* QLCDNum\
ber ------------\
----------------\
----------------\
----------------\
- */\x0a\x0aQLCDNumber\
{\x0a backgroun\
d-color: #232629\
;\x0a color: #EF\
F0F1;\x0a}\x0a\x0aQLCDNum\
ber:disabled {\x0a \
background-co\
lor: #232629;\x0a \
color: #4D545B\
;\x0a}\x0a\x0a/* QProgres\
sBar -----------\
----------------\
----------------\
----------------\
*/\x0a\x0aQProgressBa\
r {\x0a backgrou\
nd-color: #23262\
9;\x0a border: 1\
px solid #31363B\
;\x0a color: #EF\
F0F1;\x0a border\
-radius: 4px;\x0a \
text-align: ce\
nter;\x0a}\x0a\x0aQProgre\
ssBar:disabled {\
\x0a background-\
color: #232629;\x0a\
border: 1px \
solid #31363B;\x0a \
color: #4D545\
B;\x0a border-ra\
dius: 4px;\x0a t\
ext-align: cente\
r;\x0a}\x0a\x0aQProgressB\
ar::chunk {\x0a \
background-color\
: #3375A3;\x0a c\
olor: #232629;\x0a \
border-radius\
: 4px;\x0a}\x0a\x0aQProgr\
essBar::chunk:di\
sabled {\x0a bac\
kground-color: #\
18465D;\x0a colo\
r: #4D545B;\x0a \
border-radius: 4\
px;\x0a}\x0a\x0a\x0a/* -----\
----------------\
----------------\
----------------\
----------------\
--- */\x0a/* BUTTON\
S --------------\
----------------\
----------------\
----------------\
-- */\x0a/* -------\
----------------\
----------------\
----------------\
----------------\
- */\x0a\x0a/* QPushBu\
tton -----------\
----------------\
----------------\
----------------\
- */\x0a\x0aQPushButto\
n {\x0a backgrou\
nd-color: #4D545\
B;\x0a border: 1\
px solid #31363B\
;\x0a color: #EF\
F0F1;\x0a border\
-radius: 4px;\x0a \
padding: 3px;\x0a\
outline: non\
e;\x0a}\x0a\x0aQPushButto\
n:disabled {\x0a \
background-colo\
r: #31363B;\x0a \
border: 1px soli\
d #31363B;\x0a c\
olor: #4D545B;\x0a \
border-radius\
: 4px;\x0a paddi\
ng: 3px;\x0a}\x0a\x0a\x0aQPu\
shButton:checked\
{\x0a backgroun\
d-color: #31363B\
;\x0a border: 1p\
x solid #31363B;\
\x0a border-radi\
us: 4px;\x0a pad\
ding: 3px;\x0a o\
utline: none;\x0a}\x0a\
\x0aQPushButton:che\
cked:disabled {\x0a\
background-c\
olor: #232629;\x0a \
border: 1px s\
olid #31363B;\x0a \
color: #4D545B\
;\x0a border-rad\
ius: 4px;\x0a pa\
dding: 3px;\x0a \
outline: none;\x0a}\
\x0a\x0aQPushButton::m\
enu-indicator {\x0a\
subcontrol-o\
rigin: padding;\x0a\
subcontrol-p\
osition: bottom \
right;\x0a botto\
m: 4px;\x0a}\x0a\x0aQPush\
Button:pressed {\
\x0a background-\
color: #232629;\x0a\
border: 1px \
solid #232629;\x0a}\
\x0a\x0aQPushButton:ho\
ver,\x0aQPushButton\
:checked:hover{\x0a\
border: 1px \
solid #179AE0;\x0a \
color: #EFF0F\
1;\x0a}\x0a\x0aQPushButto\
n:selected,\x0aQPus\
hButton:checked:\
selected{\x0a ba\
ckground: #3375A\
3;\x0a color: #3\
1363B;\x0a}\x0a\x0a/* QTo\
olButton -------\
----------------\
----------------\
----------------\
----- */\x0a\x0aQToolB\
utton {\x0a back\
ground-color: #3\
1363B;\x0a borde\
r: 1px solid #31\
363B;\x0a border\
-radius: 4px;\x0a \
margin: 0px;\x0a \
padding: 2px;\
\x0a}\x0a\x0aQToolButton:\
checked {\x0a ba\
ckground-color: \
#232629;\x0a bor\
der: 1px solid #\
232629;\x0a}\x0a\x0aQTool\
Button:disabled \
{\x0a background\
-color: #31363B;\
\x0a}\x0a\x0aQToolButton:\
hover,\x0aQToolButt\
on:checked:hover\
{\x0a border: 1p\
x solid #179AE0;\
\x0a}\x0a\x0a/* the subco\
ntrols below are\
used only in th\
e MenuButtonPopu\
p mode */\x0a\x0aQTool\
Button[popupMode\
=\x221\x22] {\x0a padd\
ing-right: 12px;\
/* only for\
MenuButtonPopup\
*/\x0a border: \
1px #31363B; /\
* make way for t\
he popup button \
*/\x0a border-ra\
dius: 4px;\x0a}\x0a\x0a/*\
The subcontrol \
below is used on\
ly in the Instan\
tPopup or Delaye\
dPopup mode */\x0a\x0a\
QToolButton[popu\
pMode=\x222\x22] {\x0a \
padding-right: \
12px; /* on\
ly for InstantPo\
pup */\x0a borde\
r: 1px #A9A9A9; \
/* make way f\
or the popup but\
ton */\x0a}\x0a\x0aQToolB\
utton::menu-butt\
on {\x0a border-\
radius: 4px;\x0a \
border: 1px tra\
nsparent #31363B\
;\x0a border-top\
-right-radius: 4\
px;\x0a border-b\
ottom-right-radi\
us: 4px;\x0a /* \
16px width + 4px\
for border = 20\
px allocated abo\
ve */\x0a width:\
16px;\x0a outli\
ne: none;\x0a}\x0a\x0aQTo\
olButton::menu-b\
utton:hover,\x0aQTo\
olButton::menu-b\
utton:checked:ho\
ver {\x0a border\
: 1px solid #179\
AE0;\x0a}\x0a\x0aQToolBut\
ton::menu-indica\
tor {\x0a image:\
url(:/qss_icons\
/rc/down_arrow.p\
ng);\x0a top: -8\
px; /* shift\
it a bit */\x0a \
left: -4px; \
/* shift it a bi\
t */\x0a}\x0a\x0aQToolBut\
ton::menu-arrow \
{\x0a image: url\
(:/qss_icons/rc/\
down_arrow.png);\
\x0a}\x0a\x0aQToolButton:\
:menu-arrow:open\
{\x0a border: 1\
px solid #31363B\
;\x0a}\x0a\x0a/* QCommand\
LinkButton -----\
----------------\
----------------\
----------------\
*/\x0a\x0aQCommandLin\
kButton {\x0a ba\
ckground-color: \
#31363B;\x0a bor\
der: 1px solid #\
31363B;\x0a bord\
er-radius: 4px;\x0a\
padding: 0px\
;\x0a margin:0px\
;\x0a}\x0a\x0a/* --------\
----------------\
----------------\
----------------\
----------------\
*/\x0a/* INPUTS - \
NO FIELDS ------\
----------------\
----------------\
--------------- \
*/\x0a/* ----------\
----------------\
----------------\
----------------\
-------------- *\
/\x0a\x0a/* QCombobox \
----------------\
----------------\
----------------\
-------------- *\
/\x0a\x0aQComboBox {\x0a \
border: 1px s\
olid #31363B;\x0a \
border-radius:\
4px;\x0a select\
ion-background-c\
olor: #3375A3;\x0a \
padding-top: \
2px; /* This\
fix #103, #111\
*/\x0a padding-b\
ottom: 2px; /* \
This fix #103, \
#111*/\x0a paddi\
ng-left: 4px;\x0a \
padding-right:\
4px;\x0a /* min\
-width: 75px; r\
emoved to fix 10\
9 */\x0a}\x0a\x0aQComboBo\
x:disabled {\x0a \
background-colo\
r: #232629;\x0a \
color: #4D545B;\x0a\
}\x0a\x0aQComboBox:hov\
er{\x0a border: \
1px solid #179AE\
0;\x0a}\x0a\x0aQComboBox:\
on {\x0a selecti\
on-background-co\
lor: #232629;\x0a}\x0a\
\x0aQComboBox QAbst\
ractItemView {\x0a \
background-co\
lor: #232629;\x0a \
border-radius:\
4px;\x0a border\
: 1px solid #313\
63B;\x0a selecti\
on-color: #179AE\
0;\x0a selection\
-background-colo\
r: #31363B;\x0a}\x0a\x0aQ\
ComboBox::drop-d\
own {\x0a subcon\
trol-origin: pad\
ding;\x0a subcon\
trol-position: t\
op right;\x0a wi\
dth: 20px;\x0a b\
order-left-width\
: 0px;\x0a borde\
r-left-color: #3\
1363B;\x0a borde\
r-left-style: so\
lid;\x0a border-\
top-right-radius\
: 3px;\x0a borde\
r-bottom-right-r\
adius: 3px;\x0a}\x0a\x0aQ\
ComboBox::down-a\
rrow {\x0a image\
: url(:/qss_icon\
s/rc/down_arrow_\
disabled.png);\x0a}\
\x0a\x0aQComboBox::dow\
n-arrow:on,\x0aQCom\
boBox::down-arro\
w:hover,\x0aQComboB\
ox::down-arrow:f\
ocus {\x0a image\
: url(:/qss_icon\
s/rc/down_arrow.\
png);\x0a}\x0a\x0a/* QSli\
der ------------\
----------------\
----------------\
----------------\
---- */\x0a\x0aQSlider\
:disabled {\x0a \
background: #232\
629;\x0a}\x0a\x0aQSlider:\
focus {\x0a bord\
er: none;\x0a}\x0a\x0aQSl\
ider::groove:hor\
izontal {\x0a ba\
ckground: #4D545\
B;\x0a border: 1\
px solid #31363B\
;\x0a height: 4p\
x;\x0a margin: 0\
px;\x0a border-r\
adius: 4px;\x0a}\x0a\x0aQ\
Slider::sub-page\
:horizontal {\x0a \
background: #3\
375A3;\x0a borde\
r: 1px solid #31\
363B;\x0a height\
: 4px;\x0a margi\
n: 0px;\x0a bord\
er-radius: 4px;\x0a\
}\x0a\x0aQSlider::sub-\
page:horizontal:\
disabled {\x0a b\
ackground: #1846\
5D;\x0a}\x0a\x0aQSlider::\
handle:horizonta\
l {\x0a backgrou\
nd: #4D545B;\x0a \
border: 1px sol\
id #31363B;\x0a \
width: 8px;\x0a \
height: 8px;\x0a \
margin: -8px 0;\
\x0a border-radi\
us: 4px;\x0a}\x0a\x0aQSli\
der::handle:hori\
zontal:hover {\x0a \
background: #\
179AE0;\x0a bord\
er: 1px solid #1\
79AE0;\x0a}\x0a\x0aQSlide\
r::groove:vertic\
al {\x0a backgro\
und: #31363B;\x0a \
border: 1px so\
lid #31363B;\x0a \
width: 4px;\x0a \
margin: 0px;\x0a \
border-radius:\
4px;\x0a}\x0a\x0aQSlider\
::sub-page:verti\
cal {\x0a backgr\
ound: #3375A3;\x0a \
border: 1px s\
olid #31363B;\x0a \
width: 4px;\x0a \
margin: 0px;\x0a \
border-radius\
: 4px;\x0a}\x0a\x0aQSlide\
r::sub-page:vert\
ical:disabled {\x0a\
background: \
#18465D;\x0a}\x0a\x0aQSli\
der::handle:vert\
ical {\x0a backg\
round: #4D545B;\x0a\
border: 1px \
solid #31363B;\x0a \
width: 8px;\x0a \
height: 8px;\x0a\
margin: 0 -8\
px;\x0a border-r\
adius: 4px;\x0a}\x0a\x0aQ\
Slider::handle:v\
ertical:hover {\x0a\
background: \
#179AE0;\x0a bor\
der: 1px solid #\
179AE0;\x0a}\x0a\x0a/* QL\
ine ------------\
----------------\
----------------\
----------------\
------ */\x0a\x0aQLine\
Edit {\x0a backg\
round-color: #23\
2629;\x0a paddin\
g-top: 2px; \
/* This QLineEdi\
t fix 103, 111 \
*/\x0a padding-b\
ottom: 2px; /* \
This QLineEdit f\
ix 103, 111 */\x0a\
padding-left\
: 4px;\x0a paddi\
ng-right: 4px;\x0a \
border-style:\
solid;\x0a bord\
er: 1px solid #3\
1363B;\x0a borde\
r-radius: 4px;\x0a \
color: #EFF0F\
1;\x0a}\x0a\x0aQLineEdit:\
disabled {\x0a b\
ackground-color:\
#232629;\x0a co\
lor: #4D545B;\x0a}\x0a\
\x0aQLineEdit:hover\
{\x0a border: 1p\
x solid #179AE0;\
\x0a color: #EFF\
0F1;\x0a}\x0a\x0aQLineEdi\
t:selected{\x0a \
background: #337\
5A3;\x0a color: \
#31363B;\x0a}\x0a\x0a/* Q\
TabWiget -------\
----------------\
----------------\
----------------\
------- */\x0a\x0aQTab\
Widget {\x0a pad\
ding: 2px;\x0a b\
order: 1px solid\
#31363B;\x0a se\
lection-backgrou\
nd-color: #31363\
B;\x0a}\x0a\x0aQTabWidget\
::focus QWidget{\
\x0a border: non\
e;\x0a}\x0a\x0aQTabWidget\
::pane {\x0a bor\
der: 1px solid #\
31363B;\x0a padd\
ing: 2px;\x0a ma\
rgin: 0px;\x0a}\x0a\x0a\x0aQ\
TabWidget:select\
ed {\x0a backgro\
und-color: #3136\
3B;\x0a border: \
1px solid #31363\
B;\x0a}\x0a\x0a\x0aQTabWidge\
t:focus {\x0a bo\
rder: none;\x0a}\x0a\x0a/\
* QTabBar ------\
----------------\
----------------\
----------------\
---------- */\x0a\x0aQ\
TabBar {\x0a qpr\
operty-drawBase:\
0;\x0a border-r\
adius: 4px;\x0a \
border: 0px soli\
d #31363B;\x0a /\
* left: 5px; mov\
e to the right b\
y 5px - removed \
for fix */\x0a }\
\x0a\x0a\x0aQTabBar::clos\
e-button {\x0a b\
order-radius: 4p\
x;\x0a border: n\
one;\x0a padding\
: 4px;\x0a image\
: url(:/qss_icon\
s/rc/close.png);\
\x0a background:\
transparent;\x0a}\x0a\
\x0aQTabBar::close-\
button:hover {\x0a \
border-radius\
: 4px;\x0a borde\
r: none;\x0a pad\
ding: 5px;\x0a i\
mage: url(:/qss_\
icons/rc/close-h\
over.png);\x0a b\
ackground: trans\
parent;\x0a}\x0a\x0aQTabB\
ar::close-button\
:pressed {\x0a b\
order-radius: 4p\
x;\x0a border: n\
one;\x0a padding\
: 4px;\x0a image\
: url(:/qss_icon\
s/rc/close-press\
ed.png);\x0a bac\
kground: transpa\
rent;\x0a}\x0a\x0a\x0aQTabBa\
r::tab:top:selec\
ted:disabled {\x0a \
border-bottom\
: 2px solid #184\
65D;\x0a color: \
#4D545B;\x0a bac\
kground-color: #\
31363B;\x0a}\x0a\x0aQTabB\
ar::tab:bottom:s\
elected:disabled\
{\x0a border-to\
p: 2px solid #18\
465D;\x0a color:\
#4D545B;\x0a ba\
ckground-color: \
#31363B;\x0a}\x0a\x0aQTab\
Bar::tab:left:se\
lected:disabled \
{\x0a border-lef\
t: 2px solid #18\
465D;\x0a color:\
#4D545B;\x0a ba\
ckground-color: \
#31363B;\x0a}\x0a\x0aQTab\
Bar::tab:right:s\
elected:disabled\
{\x0a border-ri\
ght: 2px solid #\
18465D;\x0a colo\
r: #4D545B;\x0a \
background-color\
: #31363B;\x0a}\x0a\x0aQT\
abBar::tab:top:!\
selected:disable\
d {\x0a border-b\
ottom: 2px solid\
#31363B;\x0a co\
lor: #4D545B;\x0a}\x0a\
\x0aQTabBar::tab:bo\
ttom:!selected:d\
isabled {\x0a bo\
rder-top: 2px so\
lid #31363B;\x0a \
color: #4D545B;\
\x0a}\x0a\x0aQTabBar::tab\
:left:!selected:\
disabled {\x0a b\
order-left: 2px \
solid #31363B;\x0a \
color: #4D545\
B;\x0a}\x0a\x0aQTabBar::t\
ab:right:!select\
ed:disabled {\x0a \
border-right: \
2px solid #31363\
B;\x0a color: #4\
D545B;\x0a}\x0a\x0aQTabBa\
r::tab:top:!sele\
cted {\x0a borde\
r-bottom: 2px so\
lid #31363B;\x0a}\x0a\x0a\
QTabBar::tab:bot\
tom:!selected {\x0a\
border-top: \
2px solid #31363\
B;\x0a}\x0a\x0aQTabBar::t\
ab:left:!selecte\
d {\x0a border-l\
eft: 2px solid #\
31363B;\x0a}\x0a\x0aQTabB\
ar::tab:right:!s\
elected {\x0a bo\
rder-right: 2px \
solid #31363B;\x0a}\
\x0a\x0aQTabBar::tab:t\
op {\x0a backgro\
und-color: #2326\
29;\x0a border: \
1px solid #31363\
B;\x0a color: #E\
FF0F1;\x0a paddi\
ng: 2px;\x0a pad\
ding-left: 4px;\x0a\
padding-righ\
t: 4px;\x0a min-\
width: 5px;\x0a \
border-bottom: 1\
px solid #31363B\
;\x0a border-top\
-left-radius: 3p\
x;\x0a border-to\
p-right-radius: \
3px;\x0a}\x0a\x0aQTabBar:\
:tab:top:selecte\
d {\x0a backgrou\
nd-color: #4D545\
B;\x0a border: 1\
px solid #31363B\
;\x0a color: #EF\
F0F1;\x0a border\
-bottom: 2px sol\
id #3375A3;\x0a \
border-top-left-\
radius: 3px;\x0a \
border-top-righ\
t-radius: 3px;\x0a}\
\x0a\x0aQTabBar::tab:t\
op:!selected:hov\
er {\x0a border:\
1px solid #179A\
E0;\x0a}\x0a\x0aQTabBar::\
tab:bottom {\x0a \
color: #EFF0F1;\
\x0a border: 1px\
solid #31363B;\x0a\
border-top: \
1px solid #31363\
B;\x0a backgroun\
d-color: #232629\
;\x0a padding: 2\
px;\x0a padding-\
left: 4px;\x0a p\
adding-right: 4p\
x;\x0a border-bo\
ttom-left-radius\
: 3px;\x0a borde\
r-bottom-right-r\
adius: 3px;\x0a \
min-width: 5px;\x0a\
}\x0a\x0aQTabBar::tab:\
bottom:selected \
{\x0a color: #EF\
F0F1;\x0a backgr\
ound-color: #4D5\
45B;\x0a border:\
1px solid #3136\
3B;\x0a border-t\
op: 2px solid #3\
375A3;\x0a borde\
r-bottom-left-ra\
dius: 3px;\x0a b\
order-bottom-rig\
ht-radius: 3px;\x0a\
}\x0a\x0aQTabBar::tab:\
bottom:!selected\
:hover {\x0a bor\
der: 1px solid #\
179AE0;\x0a}\x0a\x0aQTabB\
ar::tab:left {\x0a \
color: #EFF0F\
1;\x0a border: 1\
px solid #31363B\
;\x0a border-lef\
t: 1px solid #31\
363B;\x0a backgr\
ound-color: #232\
629;\x0a padding\
: 2px;\x0a paddi\
ng-top: 4px;\x0a \
padding-bottom:\
4px;\x0a border\
-top-right-radiu\
s: 3px;\x0a bord\
er-bottom-right-\
radius: 3px;\x0a \
min-height: 5px\
;\x0a}\x0a\x0aQTabBar::ta\
b:left:selected \
{\x0a color: #EF\
F0F1;\x0a backgr\
ound-color: #4D5\
45B;\x0a border:\
1px solid #3136\
3B;\x0a border-l\
eft: 2px solid #\
3375A3;\x0a bord\
er-top-right-rad\
ius: 3px;\x0a bo\
rder-bottom-righ\
t-radius: 3px;\x0a}\
\x0a\x0aQTabBar::tab:l\
eft:!selected:ho\
ver {\x0a border\
: 1px solid #179\
AE0;\x0a}\x0a\x0aQTabBar:\
:tab:right {\x0a \
color: #EFF0F1;\
\x0a border: 1px\
solid #31363B;\x0a\
border-right\
: 1px solid #313\
63B;\x0a backgro\
und-color: #2326\
29;\x0a padding:\
2px;\x0a paddin\
g-top: 4px;\x0a \
padding-bottom: \
4px;\x0a border-\
top-left-radius:\
3px;\x0a border\
-bottom-left-rad\
ius: 3px;\x0a mi\
n-height: 5px;\x0a}\
\x0a\x0aQTabBar::tab:r\
ight:selected {\x0a\
color: #EFF0\
F1;\x0a backgrou\
nd-color: #4D545\
B;\x0a border: 1\
px solid #31363B\
;\x0a border-rig\
ht: 2px solid #3\
375A3;\x0a borde\
r-top-left-radiu\
s: 3px;\x0a bord\
er-bottom-left-r\
adius: 3px;\x0a}\x0a\x0aQ\
TabBar::tab:righ\
t:!selected:hove\
r {\x0a border: \
1px solid #179AE\
0;\x0a}\x0a\x0aQTabBar QT\
oolButton::right\
-arrow:enabled {\
\x0a image: url(\
:/qss_icons/rc/r\
ight_arrow.png);\
\x0a}\x0a\x0aQTabBar QToo\
lButton::left-ar\
row:enabled {\x0a \
image: url(:/q\
ss_icons/rc/left\
_arrow.png);\x0a}\x0a\x0a\
QTabBar QToolBut\
ton::right-arrow\
:disabled {\x0a \
image: url(:/qss\
_icons/rc/right_\
arrow_disabled.p\
ng);\x0a}\x0a\x0aQTabBar \
QToolButton::lef\
t-arrow:disabled\
{\x0a image: ur\
l(:/qss_icons/rc\
/left_arrow_disa\
bled.png);\x0a}\x0a\x0a\x0a/\
* Some examples\
from internet t\
o check\x0a\x0aQTabBar\
::tabButton() an\
d QTabBar::tabIc\
on()\x0aQTabBar::te\
ar {width: 0px; \
border: none;}\x0aQ\
TabBar::tear {im\
age: url(tear_in\
dicator.png);}\x0aQ\
TabBar::scroller\
{width:85pix;}\x0aQ\
TabBar QToolbutt\
on{background-co\
lor:\x22light blue\x22\
;}\x0a\x0aBut that lef\
t the buttons tr\
ansparant.\x0aLooke\
d confusing as t\
he tab buttons m\
igrated behind t\
he scroller butt\
ons.\x0aSo we had t\
o color the back\
ground of the s\
croller buttons\x0a\
*/\x0a\x0a/* QDockWige\
t --------------\
----------------\
----------------\
--------------- \
*/\x0a\x0aQDockWidget \
{\x0a outline: 1\
px solid #31363B\
;\x0a background\
-color: #232629;\
\x0a border: 1px\
solid #31363B;\x0a\
border-radiu\
s: 4px;\x0a titl\
ebar-close-icon:\
url(:/qss_icons\
/rc/close.png);\x0a\
titlebar-nor\
mal-icon: url(:/\
qss_icons/rc/und\
ock.png);\x0a}\x0a\x0aQDo\
ckWidget::title \
{\x0a padding: 6\
px; /* better \
size for title b\
ar */\x0a border\
: none;\x0a back\
ground-color: #3\
1363B;\x0a}\x0a\x0aQDockW\
idget::close-but\
ton {\x0a backgr\
ound-color: #313\
63B;\x0a border-\
radius: 4px;\x0a \
border: none;\x0a}\
\x0a\x0aQDockWidget::c\
lose-button:hove\
r {\x0a border: \
1px solid #31363\
B;\x0a}\x0a\x0aQDockWidge\
t::close-button:\
pressed {\x0a bo\
rder: 1px solid \
#31363B;\x0a}\x0a\x0aQDoc\
kWidget::float-b\
utton {\x0a back\
ground-color: #3\
1363B;\x0a borde\
r-radius: 4px;\x0a \
border: none;\
\x0a}\x0a\x0aQDockWidget:\
:float-button:ho\
ver {\x0a border\
: 1px solid #313\
63B;\x0a}\x0a\x0aQDockWid\
get::float-butto\
n:pressed {\x0a \
border: 1px soli\
d #31363B;\x0a}\x0a\x0a/*\
QTreeView QTabl\
eView QListView \
----------------\
----------------\
--------- */\x0a\x0aQT\
reeView:branch:s\
elected,\x0aQTreeVi\
ew:branch:hover \
{\x0a background\
: url(:/qss_icon\
s/rc/transparent\
.png);\x0a}\x0a\x0aQTreeV\
iew::branch:has-\
siblings:!adjoin\
s-item {\x0a bor\
der-image: url(:\
/qss_icons/rc/tr\
ansparent.png);\x0a\
}\x0a\x0aQTreeView::br\
anch:has-sibling\
s:adjoins-item {\
\x0a border-imag\
e: url(:/qss_ico\
ns/rc/transparen\
t.png);\x0a}\x0a\x0aQTree\
View::branch:!ha\
s-children:!has-\
siblings:adjoins\
-item {\x0a bord\
er-image: url(:/\
qss_icons/rc/tra\
nsparent.png);\x0a}\
\x0a\x0aQTreeView::bra\
nch:has-children\
:!has-siblings:c\
losed,\x0aQTreeView\
::branch:closed:\
has-children:has\
-siblings {\x0a \
image: url(:/qss\
_icons/rc/branch\
_closed.png);\x0a}\x0a\
\x0aQTreeView::bran\
ch:open:has-chil\
dren:!has-siblin\
gs,\x0aQTreeView::b\
ranch:open:has-c\
hildren:has-sibl\
ings {\x0a image\
: url(:/qss_icon\
s/rc/branch_open\
.png);\x0a}\x0a\x0aQTreeV\
iew::branch:has-\
children:!has-si\
blings:closed:ho\
ver,\x0aQTreeView::\
branch:closed:ha\
s-children:has-s\
iblings:hover {\x0a\
image: url(:\
/qss_icons/rc/br\
anch_closed-on.p\
ng);\x0a}\x0a\x0aQTreeVie\
w::branch:open:h\
as-children:!has\
-siblings:hover,\
\x0aQTreeView::bran\
ch:open:has-chil\
dren:has-sibling\
s:hover {\x0a im\
age: url(:/qss_i\
cons/rc/branch_o\
pen-on.png);\x0a}\x0a\x0a\
QListView::item:\
!selected:hover,\
\x0aQTreeView::item\
:!selected:hover\
,\x0aQTableView::it\
em:!selected:hov\
er,\x0aQColumnView:\
:item:!selected:\
hover {\x0a outl\
ine: 0;\x0a colo\
r: #179AE0;\x0a \
background-color\
: #31363B;\x0a}\x0a\x0aQL\
istView::item:se\
lected:hover,\x0aQT\
reeView::item:se\
lected:hover,\x0aQT\
ableView::item:s\
elected:hover,\x0aQ\
ColumnView::item\
:selected:hover \
{\x0a background\
: #3375A3;\x0a c\
olor: #232629;\x0a\
}\x0a\x0aQTreeView::in\
dicator:checked,\
\x0aQListView::indi\
cator:checked {\x0a\
image: url(:\
/qss_icons/rc/ch\
eckbox_checked.p\
ng);\x0a}\x0a\x0aQTreeVie\
w::indicator:unc\
hecked,\x0aQListVie\
w::indicator:unc\
hecked {\x0a ima\
ge: url(:/qss_ic\
ons/rc/checkbox_\
unchecked.png);\x0a\
}\x0a\x0aQTreeView::in\
dicator:checked:\
hover,\x0aQTreeView\
::indicator:chec\
ked:focus,\x0aQTree\
View::indicator:\
checked:pressed,\
\x0aQListView::indi\
cator:checked:ho\
ver,\x0aQListView::\
indicator:checke\
d:focus,\x0aQListVi\
ew::indicator:ch\
ecked:pressed {\x0a\
image: url(:\
/qss_icons/rc/ch\
eckbox_checked_f\
ocus.png);\x0a}\x0a\x0aQT\
reeView::indicat\
or:unchecked:hov\
er,\x0aQTreeView::i\
ndicator:uncheck\
ed:focus,\x0aQTreeV\
iew::indicator:u\
nchecked:pressed\
,\x0aQListView::ind\
icator:unchecked\
:hover,\x0aQListVie\
w::indicator:unc\
hecked:focus,\x0aQL\
istView::indicat\
or:unchecked:pre\
ssed {\x0a image\
: url(:/qss_icon\
s/rc/checkbox_un\
checked_focus.pn\
g);\x0a}\x0a\x0aQTreeView\
::indicator:inde\
terminate:hover,\
\x0aQTreeView::indi\
cator:indetermin\
ate:focus,\x0aQTree\
View::indicator:\
indeterminate:pr\
essed,\x0aQListView\
::indicator:inde\
terminate:hover,\
\x0aQListView::indi\
cator:indetermin\
ate:focus,\x0aQList\
View::indicator:\
indeterminate:pr\
essed {\x0a imag\
e: url(:/qss_ico\
ns/rc/checkbox_i\
ndeterminate_foc\
us.png);\x0a}\x0a\x0aQTre\
eView::indicator\
:indeterminate,\x0a\
QListView::indic\
ator:indetermina\
te {\x0a image: \
url(:/qss_icons/\
rc/checkbox_inde\
terminate.png);\x0a\
}\x0a\x0aQListView,\x0aQT\
reeView,\x0aQTableV\
iew,\x0aQColumnView\
{\x0a backgroun\
d-color: #232629\
;\x0a border: 1p\
x solid #31363B;\
\x0a color: #EFF\
0F1;\x0a gridlin\
e-color: #31363b\
;\x0a border-rad\
ius: 4px;\x0a}\x0a\x0aQLi\
stView:disabled,\
\x0aQTreeView:disab\
led,\x0aQTableView:\
disabled,\x0aQColum\
nView:disabled {\
\x0a background-\
color: #232629;\x0a\
color: #4D54\
5B;\x0a}\x0a\x0aQListView\
:selected,\x0aQTree\
View:selected,\x0aQ\
TableView:select\
ed,\x0aQColumnView:\
selected {\x0a b\
ackground: #3375\
A3;\x0a color: #\
31363B;\x0a}\x0a\x0aQList\
View:hover,\x0aQTre\
eView::hover,\x0aQT\
ableView::hover,\
\x0aQColumnView::ho\
ver {\x0a backgr\
ound-color: #232\
629;\x0a border:\
1px solid #179A\
E0;\x0a}\x0a\x0aQListView\
::item:pressed,\x0a\
QTreeView::item:\
pressed,\x0aQTableV\
iew::item:presse\
d,\x0aQColumnView::\
item:pressed {\x0a \
background-co\
lor: #3375A3;\x0a}\x0a\
\x0aQListView::item\
:selected:active\
,\x0aQTreeView::ite\
m:selected:activ\
e,\x0aQTableView::i\
tem:selected:act\
ive,\x0aQColumnView\
::item:selected:\
active {\x0a bac\
kground-color: #\
3375A3;\x0a}\x0a\x0aQTabl\
eCornerButton::s\
ection {\x0a bac\
kground-color: #\
232629;\x0a bord\
er: 1px transpar\
ent #31363B;\x0a \
border-radius: \
0px;\x0a}\x0a\x0a/* QHead\
erView ---------\
----------------\
----------------\
----------------\
--- */\x0a\x0aQHeaderV\
iew {\x0a backgr\
ound-color: #313\
63B;\x0a border:\
0px transparent\
#31363B;\x0a pa\
dding: 0px;\x0a \
margin: 0px;\x0a \
border-radius: \
0px;\x0a}\x0a\x0aQHeaderV\
iew:disabled {\x0a \
background-co\
lor: #31363B;\x0a \
border: 1px tr\
ansparent #31363\
B;\x0a padding: \
2px;\x0a}\x0a\x0aQHeaderV\
iew::section {\x0a \
background-co\
lor: #31363B;\x0a \
color: #EFF0F1\
;\x0a padding: 2\
px;\x0a border-r\
adius: | |
__all__ = ['ButtonBehavior', 'ToggleButtonBehavior', 'FloatBehavior']
from kivy.clock import Clock
from kivy.config import Config
from weakref import ref
from time import time
from kivy.app import App
from kivy.uix.image import Image
from utils import image, icon
from kivy.metrics import dp
from kivy.properties import (
OptionProperty, ObjectProperty,
BooleanProperty, NumericProperty,
ListProperty,
)
from kivy.graphics import Color, Line, Rectangle
class FloatBehavior(object):
hint_x = NumericProperty(0.5)
hint_y = NumericProperty(0.5)
name = ''
move_layout = False
selected = False
resized = False
resize_icon_size = ListProperty([dp(20), dp(20)])
root = ObjectProperty(None)
pad_widget = ObjectProperty(None)
last_touch_pos = ListProperty([0, 0])
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self.binds)
def binds(self, *args):
self.bind(hint_x=self.update_pos)
self.bind(hint_y=self.update_pos)
self.root = App.get_running_app().root.ids.gamepad
self.pad_widget = self.root.ids.content_pad
self.pad_widget.bind(pos=self.update_pos)
self.pad_widget.bind(size=self.update_pos)
Clock.schedule_once(self.update_pos)
def update_pos(self, *args):
if not self.get_root_window():
return None
self.x = self.pad_widget.x+(self.pad_widget.width*self.hint_x)
self.y = self.pad_widget.y+(self.pad_widget.height*self.hint_y)
if (self.selected or self.resized) and not self.move_layout:
self.find_equals()
Clock.schedule_once(self.remove_equals, 2)
def collid_quad(self, rx, ry, tx, ty, width, height, *args):
if tx < rx or tx > (rx+width) or ty < ry or ty > (ry+height):
return False
return True
def on_touch_down(self, touch):
self.last_touch_pos = touch.pos
if self.collide_point(*touch.pos):
if self.root.move_layout:
self.move_layout = True
if touch.is_double_tap:
self.resized = True
self.selected = False
self.clear_background_selected()
self.draw_background_resized()
else:
if self.resized:
tx, ty = touch.pos
w, h = self.resize_icon_size
if not self.collid_quad(self.x, self.y, tx, ty, w, h) and\
not self.collid_quad(self.x, self.y+self.height-h, tx, ty, w, h) and\
not self.collid_quad(self.x+self.width-w, self.y+self.height-h, tx, ty, w, h) and\
not self.collid_quad(self.x+self.width-w, self.y, tx, ty, w, h):
self.resized = False
if not self.resized:
self.selected = True
self.clear_background_resized()
self.draw_background_selected()
return False
else:
self.selected = False
self.resized = False
self.move_layout = False
self.clear_background_selected()
self.clear_background_resized()
return super().on_touch_down(touch)
def on_touch_up(self, touch):
if self.move_layout:
self.move_layout = False
json_pos = self.root.get_json(name='position')
json_pos[self.name]['hint_x'] = self.hint_x
json_pos[self.name]['hint_y'] = self.hint_y
json_pos[self.name]['width'] = self.width
json_pos[self.name]['height'] = self.height
self.root.update_json(json_pos, name='position')
Clock.schedule_once(self.root.clear_middle_line, 2)
Clock.schedule_once(self.remove_equals, 2)
return super().on_touch_up(touch)
def on_touch_move(self, touch):
if self.root.move_layout and self.move_layout:
tx, ty = touch.pos
if self.selected:
self.clear_background_selected()
s_center = self.x+(self.width/2)
p_center = self.pad_widget.x+(self.pad_widget.width/2)
if self.precision(s_center, p_center):
if self.precision(tx, p_center, (self.width/2)):
tx = (p_center-(self.width/2))
self.root.add_middle_line()
Clock.unschedule(self.root.clear_middle_line)
else:
Clock.schedule_once(self.root.clear_middle_line, 1)
self.hint_x = round(tx/self.pad_widget.width, 2)
self.hint_y = round(ty/self.pad_widget.height, 2)
self.find_equals()
self.draw_background_selected()
self.last_touch_pos = touch.pos
return False
elif self.resized:
lx, ly = self.last_touch_pos
vl = ((tx-lx) + (ty-ly)) / 2
self.size = (round(self.width+vl, 2), round(self.height+vl, 2))
self.draw_background_resized()
self.last_touch_pos = touch.pos
self.find_equals()
return False
self.last_touch_pos = touch.pos
return super().on_touch_move(touch)
def get_floats_config(self, *args):
json_pos = self.root.get_json(name='position')
del json_pos[self.name]
px, py = self.pad_widget.pos
pw, ph = self.pad_widget.size
for name, values in json_pos.items():
x = px+(pw*values['hint_x'])
y = py+(ph*values['hint_y'])
w, h = values['width'], values['height']
center_x, center_y = (x+(w*0.5)), (y+(h*0.5))
yield (name, x, y, w, h, center_x, center_y)
def precision(self, n1, n2, marg=dp(5)):
return (n1 > (n2-marg) and n1 < (n2+marg))
def find_equals(self, *args):
sw, sh = self.size
sx, sy = self.pos
cx, cy = self.center_x, self.center_y
for name, x, y, w, h, center_x, center_y in self.get_floats_config():
self.root.remove_line(f'{name}_x')
if self.precision(x, sx) or self.precision(x, (sx+sw)):
self.root.add_line(x, f'{name}_x')
if self.precision(center_x, cx):
self.root.add_line(center_x, f'{name}_x')
if self.precision((x+w), sx) or self.precision((x+w), (sx+sw)):
self.root.add_line((x+w), f'{name}_x')
self.root.remove_line(f'{name}_y')
if self.precision(y, sy) or self.precision(y, (sy+sh)):
self.root.add_line(y, f'{name}_y')
if self.precision(center_y, cy):
self.root.add_line(center_y, f'{name}_y')
if self.precision((y+h), (sy+sh)) or self.precision((y+h), sy):
self.root.add_line((y+h), f'{name}_y')
def remove_equals(self, *args):
for name, x, y, w, h, center_x, center_y in self.get_floats_config():
toph = (self.y+self.height)
leftw = (self.x+self.width)
if self.precision(x, self.x) or self.precision(center_x, self.center_x):
self.root.remove_line(f'{name}_x')
elif self.precision((x+w), leftw) or self.precision((x+w), self.x):
self.root.remove_line(f'{name}_x')
if self.precision(y, self.y) or self.precision(center_y, self.center_y):
self.root.remove_line(f'{name}_y')
elif self.precision((y+h), toph) or self.precision((y+h), self.y):
self.root.remove_line(f'{name}_y')
def update_background_selected(self, *args):
self.clear_background_selected(unbind=False)
self.draw_background_selected(bind=False)
def draw_background_selected(self, bind=True, *args):
add = self.canvas.before.add
img = Image(size_hint=(None, None), size=(dp(35), dp(35)), source=image('move'))
add(Color(rgba=[1, 1, 1, 1], group='selected'))
add(Rectangle(
pos=(self.x-(img.width*0.5), self.y-(img.height*0.5)),
size=img.size, texture=img.texture, group='selected'))
add(Color(rgba=[0, 1, 0, 1], group='selected'))
add(Line(rectangle=[*self.pos, *self.size], group='selected'))
if bind:
self.bind(size=self.update_background_selected)
self.bind(pos=self.update_background_selected)
def clear_background_selected(self, unbind=True, *args):
self.canvas.before.remove_group('selected')
if unbind:
self.unbind(size=self.update_background_selected)
self.unbind(pos=self.update_background_selected)
def update_background_resized(self, *args):
self.clear_background_resized(unbind=False)
self.draw_background_resized(bind=False)
def draw_background_resized(self, bind=True, *args):
add = self.canvas.before.add
img = Image(size_hint=(None, None), size=self.resize_icon_size, source=icon('resize'))
add(Color(rgba=[1, 1, 1, 1], group='resized'))
add(Rectangle(
pos=(self.x, self.y),
size=img.size, texture=img.texture, group='resized'))
add(Rectangle(
pos=(self.x, self.y+self.height-img.height),
size=img.size, texture=img.texture, group='resized'))
add(Rectangle(
pos=(self.x+self.width-img.width, self.y+self.height-img.height),
size=img.size, texture=img.texture, group='resized'))
add(Rectangle(
pos=(self.x+self.width-img.width, self.y),
size=img.size, texture=img.texture, group='resized'))
add(Color(rgba=[1, 0, 0, 1], group='resized'))
add(Line(rectangle=[*self.pos, *self.size], group='resized'))
if bind:
self.bind(size=self.update_background_resized)
self.bind(pos=self.update_background_resized)
def clear_background_resized(self, unbind=True, *args):
self.canvas.before.remove_group('resized')
if unbind:
self.unbind(size=self.update_background_resized)
self.unbind(pos=self.update_background_resized)
class ButtonBehavior(object):
'''
This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:class:`~kivy.uix.button.Button` behavior. Please see the
:mod:`button behaviors module <kivy.uix.behaviors.button>` documentation
for more information.
:Events:
`on_press`
Fired when the button is pressed.
`on_release`
Fired when the button is released (i.e. the touch/click that
pressed the button goes away).
'''
state = OptionProperty('normal', options=('normal', 'down'))
'''The state of the button, must be one of 'normal' or 'down'.
The state is 'down' only when the button is currently touched/clicked,
otherwise its 'normal'.
:attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'normal'.
'''
last_touch = ObjectProperty(None)
'''Contains the last relevant touch received by the Button. This can
be used in `on_press` or `on_release` in order to know which touch
dispatched the event.
.. versionadded:: 1.8.0
:attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and
defaults to `None`.
'''
min_state_time = NumericProperty(0)
'''The minimum period of time which the widget must remain in the
`'down'` state.
.. versionadded:: 1.9.1
:attr:`min_state_time` is a float and defaults to 0.035. This value is
taken from :class:`~kivy.config.Config`.
'''
always_release = BooleanProperty(False)
'''This determines whether or not the widget fires an `on_release` event if
the touch_up is outside the widget.
.. versionadded:: 1.9.0
.. versionchanged:: 1.10.0
The default value is now False.
:attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `False`.
'''
def __init__(self, **kwargs):
self.register_event_type('on_press')
self.register_event_type('on_release')
if 'min_state_time' not in kwargs:
self.min_state_time = float(Config.get('graphics',
'min_state_time'))
super(ButtonBehavior, self).__init__(**kwargs)
self.__state_event = None
self.__touch_time = None
self.fbind('state', self.cancel_event)
def _do_press(self):
self.state = 'down'
def _do_release(self, *args):
self.state = 'normal'
def cancel_event(self, *args):
if self.__state_event:
self.__state_event.cancel()
self.__state_event = None
def on_touch_down(self, touch):
if super(ButtonBehavior, self).on_touch_down(touch):
return True
if touch.is_mouse_scrolling:
return False
if not self.collide_point(touch.x, touch.y):
return False
if self in touch.ud:
return False
touch.grab(self)
touch.ud[self] = True
self.last_touch = touch
self.__touch_time = time()
self._do_press()
if App._running_app:
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
return True
if super(ButtonBehavior, self).on_touch_move(touch):
return True
return self in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(ButtonBehavior, self).on_touch_up(touch)
assert(self in touch.ud)
touch.ungrab(self)
self.last_touch = touch
if (not self.always_release and
not self.collide_point(*touch.pos)):
self._do_release()
return
touchtime = time() - self.__touch_time
if touchtime < self.min_state_time:
self.__state_event = Clock.schedule_once(
self._do_release, self.min_state_time - touchtime)
else:
self._do_release()
if App._running_app:
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
def trigger_action(self, duration=0.1):
'''Trigger whatever action(s) have been bound to the button by calling
both the on_press and on_release callbacks.
This is similar to a quick button press without using any touch events,
but note that like most kivy code, this is not guaranteed to be safe to
call from external threads. If needed use
:class:`Clock <kivy.clock.Clock>` to safely schedule this function and
the resulting callbacks to be called from the main thread.
Duration is the length of the press in seconds. Pass 0 if you want
the action to happen instantly.
.. versionadded:: 1.8.0
'''
self._do_press()
self.dispatch('on_press')
def trigger_release(dt):
self._do_release()
self.dispatch('on_release')
if not duration:
trigger_release(0)
else:
Clock.schedule_once(trigger_release, duration)
class ToggleButtonBehavior(ButtonBehavior):
'''This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:mod:`~kivy.uix.togglebutton` behavior. Please see the
:mod:`togglebutton behaviors module <kivy.uix.behaviors.togglebutton>`
documentation for more information.
.. versionadded:: 1.8.0
'''
__groups = {}
group = ObjectProperty(None, allownone=True)
'''Group of the button. If `None`, no group will be used (the button will be
independent). If specified, :attr:`group` must be a hashable object, like
a string. Only one button in a group can be in a 'down' state.
:attr:`group` is a :class:`~kivy.properties.ObjectProperty` and defaults to
`None`.
'''
allow_no_selection = BooleanProperty(True)
'''This specifies whether the widgets in a group allow no selection i.e.
everything to be deselected.
.. versionadded:: 1.9.0
:attr:`allow_no_selection` is a :class:`BooleanProperty` and defaults to
`True`
'''
def __init__(self, **kwargs):
self._previous_group = None
super(ToggleButtonBehavior, self).__init__(**kwargs)
def on_group(self, *largs):
groups = ToggleButtonBehavior.__groups
if self._previous_group:
group = groups[self._previous_group]
for item in group[:]:
if item() is self:
group.remove(item)
break
group = self._previous_group = self.group
if group not in groups:
groups[group] | |
now look at all of the files, new and old, to order the diff right
# so we don't have to go seeking all over the changeset
configList = []
normalList = []
removeList = []
encapsulatedList = []
for job in jobs:
if self.hasNewTrove(job[0], job[2][0], job[2][1]):
trvCs = self.getNewTroveVersion(job[0], job[2][0], job[2][1])
for (pathId, path, fileId, fileVersion) in \
trvCs.getNewFileList():
fileStream = self.getFileChange(None, fileId)
if trvCs.hasCapsule():
encapsulatedList.append((pathId, fileId,
(None, None, None, None),
(path, fileId, fileStream)))
elif files.frozenFileFlags(fileStream).isConfig():
configList.append((pathId, fileId,
(None, None, None, None),
(path, fileId, fileStream)))
else:
normalList.append((pathId, fileId,
(None, None, None, None),
(path, fileId, fileStream)))
for (pathId, path, fileId, fileVersion) in \
trvCs.getChangedFileList():
oldFileObj = fileObjects.pop(0)
fileObj = oldFileObj.copy()
oldFileId, oldFileVersion, oldPath = filesNeeded.pop(0)[1:4]
diff = self.getFileChange(oldFileId, fileId)
# check if new and old files are of the same type
if fileObj.lsTag == diff[1]:
fileObj.twm(diff, fileObj)
else:
fileObj = troveSource.getFileVersion(
pathId, fileId, fileVersion)
if path is None:
path = oldPath
if trvCs.hasCapsule():
encapsulatedList.append((pathId, fileId,
(oldPath, oldFileId, oldFileVersion, oldFileObj),
(path, fileId, fileObj.freeze())))
elif fileObj.flags.isConfig():
configList.append((pathId, fileId,
(oldPath, oldFileId, oldFileVersion,
oldFileObj),
(path, fileId, fileObj.freeze())))
else:
normalList.append((pathId, fileId,
(oldPath, oldFileId, oldFileVersion,
oldFileObj),
(path, fileId, fileObj.freeze())))
for pathId in trvCs.getOldFileList():
oldFileObj = fileObjects.pop(0)
oldFileId, oldFileVersion, oldPath = filesNeeded.pop(0)[1:4]
removeList.append((oldPath, oldFileObj))
else:
for (pathId, fileId, version, path), fileObj in \
itertools.izip(filesNeeded, fileObjects):
removeList.append((path, fileObj))
for path, fileObj in removeList:
yield "diff --git a%s b%s\n" % (path, path)
yield "deleted file mode %o\n" % (fileObj.statType |
fileObj.inode.perms())
yield "Binary files %s and /dev/null differ\n" % path
configList.sort()
normalList.sort()
encapsulatedList.sort()
for (pathId, fileId, oldInfo, newInfo) in \
itertools.chain(configList, normalList):
newInfo = newInfo[0:2] + (files.ThawFile(newInfo[2], pathId),)
for x in self._makeFileGitDiff(troveSource, pathId,
oldInfo, newInfo, diffBinaries):
yield x
for (pathId, fileId, oldInfo, newInfo) in encapsulatedList:
newInfo = newInfo[0:2] + (files.ThawFile(newInfo[2], pathId),)
for x in self._makeFileGitDiffCapsule(troveSource, pathId,
oldInfo, newInfo, diffBinaries):
yield x
def __init__(self, data = None):
streams.StreamSet.__init__(self, data)
self.configCache = {}
self.fileContents = {}
self.absolute = False
self.local = 0
class ChangeSetFromAbsoluteChangeSet(ChangeSet):
#streamDict = ChangeSet.streamDict
def __init__(self, absCS):
self.absCS = absCS
ChangeSet.__init__(self)
class ChangeSetKeyConflictError(Exception):
name = "ChangeSetKeyConflictError"
def __init__(self, key, trove1=None, file1=None, trove2=None, file2=None):
if len(key) == 16:
self.pathId = key
self.fileId = None
else:
self.pathId, self.fileId = parseKey(key)
self.trove1 = trove1
self.file1 = file1
self.trove2 = trove2
self.file2 = file2
def getKey(self):
if self.fileId:
return self.pathId + self.fileId
else:
return self.pathId
def getPathId(self):
return self.pathId
def getConflicts(self):
return (self.trove1, self.file1), (self.trove2, self.file2)
def getTroves(self):
return self.trove1, self.trove2
def getPaths(self):
return self.file1[1], self.file2[1]
def __str__(self):
if self.trove1 is None:
return '%s: %s,%s' % (self.name,
sha1helper.md5ToString(self.pathId),
sha1helper.sha1ToString(self.fileId))
else:
path1, path2 = self.getPaths()
trove1, trove2 = self.getTroves()
v1 = trove1.getNewVersion().trailingRevision()
v2 = trove2.getNewVersion().trailingRevision()
trove1Info = '(%s %s)' % (trove1.getName(), v1)
trove2Info = '(%s %s)' % (trove2.getName(), v2)
if path1:
trove1Info = path1 + ' ' + trove1Info
if path2:
trove2Info = path2 + ' ' + trove2Info
return (('%s:\n'
' %s\n'
' conflicts with\n'
' %s') % (self.name, trove1Info, trove2Info))
class PathIdsConflictError(ChangeSetKeyConflictError):
name = "PathIdsConflictError"
def __str__(self):
if self.trove1 is None:
return '%s: %s' % (self.name, sha1helper.md5ToString(self.pathId))
else:
return ChangeSetKeyConflictError.__str__(self)
class ReadOnlyChangeSet(ChangeSet):
def thawFromFile(self, f):
while True:
s = f.read(5)
if not s:
break
tag, size = struct.unpack("!BI", s)
size &= ~(1 << 31)
if tag not in self.streamDict:
# this implements ignoreUnknown = True
f.read(size)
continue
obj = getattr(self, self.streamDict[tag][2])
if tag == _STREAM_CS_TROVES:
obj.thawFromFile(f, size)
else:
s = f.read(size)
obj.thaw(s)
def addFileContents(self, *args, **kw):
raise NotImplementedError
def fileQueueCmp(a, b):
if a[1][0] == "1" and b[1][0] == "0":
return -1
elif a[1][0] == "0" and b[1][0] == "1":
return 1
if a[0] < b[0]:
return -1
elif a[0] == b[0]:
if len(a[0]) == 16:
raise PathIdsConflictError(a[0])
else:
# this is an actual conflict if one of the files is a diff
# (other file types conflicts are okay; replacing contents
# with a ptr is okay, as is the opposite)
if (a[2:] == ChangedFileTypes.diff[4:] or
b[2:] == ChangedFileTypes.diff[4:]):
raise ChangeSetKeyConflictError(a[0])
else:
return 1
fileQueueCmp = staticmethod(fileQueueCmp)
def _nextFile(self):
if self.lastCsf:
next = self.lastCsf.getNextFile()
if next:
util.tupleListBsearchInsert(self.fileQueue,
next + (self.lastCsf,),
self.fileQueueCmp)
self.lastCsf = None
if not self.fileQueue:
return None
rc = self.fileQueue[0]
self.lastCsf = rc[3]
del self.fileQueue[0]
return rc
def getFileContents(self, pathId, fileId, compressed = False):
name = None
key = makeKey(pathId, fileId)
if self.configCache.has_key(pathId):
assert(not compressed)
name = pathId
(tag, contents, alreadyCompressed) = self.configCache[pathId]
cont = contents
elif self.configCache.has_key(key):
name = key
(tag, contents, alreadyCompressed) = self.configCache[key]
cont = contents
if compressed:
f = util.BoundedStringIO()
compressor = util.DeterministicGzipFile(None, "w", fileobj = f)
util.copyfileobj(cont.get(), compressor)
compressor.close()
f.seek(0)
cont = filecontents.FromFile(f, compressed = True)
else:
self.filesRead = True
rc = self._nextFile()
while rc:
name, tagInfo, f, csf = rc
if not compressed:
f = gzip.GzipFile(None, "r", fileobj = f)
# if we found the key we're looking for, or the pathId
# we got is a config file, cache or break out of the loop
# accordingly
#
# we check for both the key and the pathId here for backwards
# compatibility reading old change set formats
if name == key or name == pathId or tagInfo[0] == '1':
tag = 'cft-' + tagInfo.split()[1]
cont = filecontents.FromFile(f, compressed = compressed)
# we found the one we're looking for, break out
if name == key or name == pathId:
self.lastCsf = csf
break
rc = self._nextFile()
if name != key and name != pathId:
if len(pathId) == 16:
pathId = sha1helper.md5ToString(pathId)
raise KeyError, 'pathId %s is not in the changeset' % pathId
else:
return (tag, cont)
def makeAbsolute(self, repos):
"""
Converts this (relative) change set to an abstract change set. File
streams and contents are omitted unless the file changed. This is fine
for changesets being committed, not so hot for changesets which are
being applied directly to a system. The absolute changeset is returned
as a new changeset; self is left unchanged.
"""
assert(not self.absolute)
absCs = ChangeSet()
absCs.setPrimaryTroveList(self.getPrimaryTroveList())
neededFiles = []
oldTroveList = [ (x.getName(), x.getOldVersion(),
x.getOldFlavor()) for x in self.newTroves.values() ]
oldTroves = repos.getTroves(oldTroveList)
# for each file find the old fileId for it so we can assemble the
# proper stream and contents
for trv, troveCs in itertools.izip(oldTroves,
self.newTroves.itervalues()):
if trv.troveInfo.incomplete():
raise errors.TroveError('''\
Cannot apply a relative changeset to an incomplete trove. Please upgrade conary and/or reinstall %s=%s[%s].''' % (trv.getName(), trv.getVersion(),
trv.getFlavor()))
troveName = troveCs.getName()
newVersion = troveCs.getNewVersion()
newFlavor = troveCs.getNewFlavor()
assert(troveCs.getOldVersion() == trv.getVersion())
assert(trv.getName() == troveName)
# XXX this is broken. makeAbsolute() is only used for
# committing local changesets, and they can't have new
# files, so we're OK at the moment.
for (pathId, path, fileId, version) in troveCs.getNewFileList():
filecs = self.files[(None, fileId)]
newFiles.append((None, fileId, filecs))
for (pathId, path, fileId, version) in troveCs.getChangedFileList():
(oldPath, oldFileId, oldVersion) = trv.getFile(pathId)
filecs = self.files[(oldFileId, fileId)]
neededFiles.append((pathId, oldFileId, fileId, oldVersion,
version, filecs))
# we've mucked around with this troveCs, it won't pass
# integrity checks
trv.applyChangeSet(troveCs, skipIntegrityChecks = True)
newCs = trv.diff(None, absolute = True)[0]
absCs.newTrove(newCs)
fileList = [ (x[0], x[1], x[3]) for x in neededFiles ]
fileObjs = repos.getFileVersions(fileList)
# XXX this would be markedly more efficient if we batched up getting
# file contents
for ((pathId, oldFileId, newFileId, oldVersion, newVersion, filecs),
fileObj) in itertools.izip(neededFiles, fileObjs):
fileObj.twm(filecs, fileObj)
(absFileCs, hash) = fileChangeSet(pathId, None, fileObj)
absCs.addFile(None, newFileId, absFileCs)
if newVersion != oldVersion and fileObj.hasContents:
# we need the contents as well
if files.contentsChanged(filecs):
if fileObj.flags.isConfig():
# config files aren't available compressed
(contType, cont) = self.getFileContents(
pathId, newFileId)
if contType == ChangedFileTypes.diff:
origCont = repos.getFileContents([(oldFileId,
oldVersion)])[0]
diff = cont.get().readlines()
oldLines = origCont.get().readlines()
(newLines, failures) = patch.patch(oldLines, diff)
assert(not failures)
fileContents = filecontents.FromString(
"".join(newLines))
absCs.addFileContents(pathId, newFileId,
ChangedFileTypes.file,
fileContents, True)
else:
absCs.addFileContents(pathId, newFileId,
ChangedFileTypes.file,
cont, True)
else:
(contType, cont) = self.getFileContents(pathId,
newFileId, compressed = True)
assert(contType == ChangedFileTypes.file)
absCs.addFileContents(pathId, newFileId,
ChangedFileTypes.file,
cont, False, compressed = True)
else:
# include the old contents; we might need them for
# a distributed branch
cont = repos.getFileContents([(oldFileId, oldVersion)])[0]
absCs.addFileContents(pathId, newFileId,
ChangedFileTypes.file, cont,
fileObj.flags.isConfig())
return absCs
def rootChangeSet(self, db, troveMap):
"""
Converts | |
parent dim is checked elsewhere : R505 or R404
if parent_varname in self._lis_vars:
typename = "location_index_set"
else:
typename = "mesh"
msg = (
f'has the element dimension "{data_meshdim}", which does '
f"not match the {parent_location} dimension of the "
f'"{parent_varname}" {typename}, which is "{parent_dim}".'
)
log_meshdata("R510", msg)
def check_lis_var(self, lis_var: NcVariableSummary):
"""Validity-check a location-index-set variable."""
# Add the lis element dimension into self._all_mesh_dims
dims = lis_var.dimensions
if len(dims) == 1:
# Lis has a valid location and single dim
# So we can record 'our' dim as an element-dim
(lis_dim,) = dims
# Note: record this under **all** locations.
# Since we want to recognise this as a 'mesh dim', even if the lis
# has an invalid mesh or location, and we don't use this to check
# it against the parent element dim.
self._all_mesh_dims[lis_var.name] = {
name: lis_dim for name in _VALID_UGRID_LOCATIONS
}
def log_lis(errcode, msg):
self.state(errcode, "location-index-set", lis_var.name, msg)
cf_role = lis_var.attributes.get("cf_role")
if cf_role is None:
log_lis("R401", "has no 'cf_role' attribute.")
elif cf_role != "location_index_set":
msg = f'has cf_role="{cf_role}", instead of "location_index_set".'
log_lis("R401", msg)
mesh_var = None # Used to skip additional checks when mesh is bad
mesh_name = lis_var.attributes.get("mesh")
if mesh_name is None:
log_lis("R402", "has no 'mesh' attribute.")
else:
msg_ref = self.var_ref_problem(mesh_name)
if msg_ref:
msg = f'has mesh="{mesh_name}", which {msg_ref}.'
log_lis("R402", msg)
else:
mesh_name = str(mesh_name)
mesh_var = self._mesh_vars.get(mesh_name)
if mesh_var is None:
msg = (
f'has mesh="{mesh_name}", '
"which is not a valid mesh variable."
)
log_lis("R402", msg)
location = lis_var.attributes.get("location")
parent_dim = None
if location is None:
log_lis("R403", "has no 'location' attribute.")
elif str(location) not in _VALID_UGRID_LOCATIONS:
msg = (
f'has location="{location}", which is not one of '
'"face", "edge" or "node".'
)
log_lis("R403", msg)
elif mesh_var:
# check the location exists in the parent mesh
location = str(location)
mesh_dims = self._all_mesh_dims[mesh_name]
parent_dim = mesh_dims[location]
if parent_dim is None:
msg = (
f'has location="{location}", which is a location '
"that does not exist in the parent mesh, "
f'"{mesh_name}".'
)
log_lis("R404", msg)
# Don't attempt any further checks against the mesh
mesh_var = None
lis_dims = lis_var.dimensions
n_lis_dims = len(lis_dims)
if n_lis_dims != 1:
msg = (
f"has dimensions {lis_dims!r}, of which there are "
f"{n_lis_dims} instead of 1."
)
log_lis("R405", msg)
lis_dim = None
else:
(lis_dim,) = lis_dims
index_value = lis_var.attributes.get("start_index")
if index_value is not None:
# Note: check value, converted to int.
# This avoids an extra warning for strings like "0", "1",
# since a non-integral type triggers an A407 warning anyway.
if int(index_value) not in (0, 1):
msg = (
f'has start_index="{index_value}", which is not '
"either 0 or 1."
)
log_lis("R406", msg)
#
# Advisory checks
#
if lis_var.dtype.kind != "i":
msg = f'has type "{lis_var.dtype}", which is not an integer type.'
log_lis("A401", msg)
if self.do_data_checks:
# TODO: data checks
log_lis("A402", "contains missing indices.")
if "_FillValue" in lis_var.attributes:
msg = (
"has a '_FillValue' attribute, which should not be present "
"on a location-index-set."
)
log_lis("A403", msg)
if mesh_var and lis_dim and parent_dim:
len_lis = self.file_scan.dimensions[lis_dim].length
len_parent = self.file_scan.dimensions[parent_dim].length
if len_lis >= len_parent:
msg = (
f'has dimension "{lis_dim}", length {len_lis}, which is '
f"longer than the {location} dimension of the parent "
f'mesh "{mesh_name}" : '
f'"{parent_dim}", length {len_parent}.'
)
log_lis("A404", msg)
if self.do_data_checks:
# TODO: data checks
msg = "contains repeated index values."
log_lis(
"A405",
)
if mesh_var:
msg = (
"contains index values which are outside the range of the "
f'parent mesh "{mesh_name}" {location} dimension, '
f' : "{parent_dim}", range 1..{len_parent}.'
)
log_lis(
"A406",
)
if index_value is not None and index_value.dtype != lis_var.dtype:
msg = (
f"has a 'start_index' of type \"{index_value.dtype}\", "
"which is different from the variable type, "
f'"{lis_var.dtype}".'
)
log_lis("A407", msg)
def dataset_identify_containers(self):
"""
Find "mesh" , "mesh data", and "location index set" variables,
Also include possibles due to mesh/lis references from data variables.
Results set as self properties :
self._meshdata_vars
self._mesh_vars
self._lis_vars
self._mesh_referrers
self._lis_referrers
"""
# Location index sets are those with a cf_role of 'location_index_set'
self._lis_vars = vars_w_props(
self._all_vars, cf_role="location_index_set"
)
# Mesh data variables are those with either a 'mesh' or
# 'location_index_set' attribute, but excluding the lis-vars.
self._meshdata_vars = {
varname: var
for varname, var in self._all_vars.items()
if (
varname not in self._lis_vars
and (
"mesh" in var.attributes
or "location_index_set" in var.attributes
)
)
}
# Mesh vars are those with cf_role="mesh_topology".
self._mesh_vars = vars_w_props(self._all_vars, cf_role="mesh_topology")
# Scan for any meshvars referred to by 'mesh' or 'location_index_set'
# properties in mesh-data vars.
# These are included among potential meshdata- and lis- variables
# (so they are detected + checked even without the correct cf_role)
self._mesh_referrers = {}
self._lis_referrers = {}
for referrer_name, referrer_var in list(self._meshdata_vars.items()):
# Note: taking a copy as we may modify _meshdata_vars in the loop
meshprop = referrer_var.attributes.get("mesh")
meshvar_name = property_as_single_name(meshprop)
if (
meshvar_name is not None
and meshvar_name in self._all_vars
and meshvar_name not in self._mesh_vars
):
# Add this reference to our list of all meshvars
self._mesh_vars[meshvar_name] = self._all_vars[meshvar_name]
# Record name of referring var.
# N.B. potentially this can overwrite a previous referrer,
# but "any one of several" will be OK for our purpose.
self._mesh_referrers[meshvar_name] = referrer_name
# Do something similar with lis references.
meshprop = referrer_var.attributes.get("location_index_set")
lisvar_name = property_as_single_name(meshprop)
if (
lisvar_name is not None
and lisvar_name in self._all_vars
and lisvar_name not in self._lis_vars
):
# Add this reference to our list of all meshvars
self._lis_vars[lisvar_name] = self._all_vars[lisvar_name]
# Also remove it from the meshdata-vars if it was there
# N.B. this could only happen if it has a wrong cf_role, but
# that is just the kind of error we dealing with here.
self._meshdata_vars.pop(lisvar_name, None)
# Record name of referring var.
self._lis_referrers[lisvar_name] = referrer_name
def dataset_check_containers_and_map_dims(self):
"""
Check all putative mesh + lis variables and collect dimension maps.
Writes self._all_mesh_dims: {<mesh or lis name>: {location: dim-name}}
Note: in checking the individual mesh variables, we also check all
the coordinates and connectivities.
This routine also sets self._allowed_cfrole_varnames
"""
# Build a map of the dimensions of all the meshes,
# all_meshes_dims: {meshname: {location: dimname}}
self._all_mesh_dims = {}
# This list of "UGRID variables" is used by 'dataset_global_checks' to
# find any vars with a UGRID-style 'cf_role' that should not have one.
# N.B. we don't include meshdata-variables, or coordinate variables,
# which should *not* have a 'cf_role' anyway.
# After this, all connectivities will be added by 'check_connectivity'.
self._allowed_cfrole_varnames = list(self._mesh_vars.keys()) + list(
self._lis_vars.keys()
)
# Find all connectivity variables and, initially, put them all on the
# "orphan connectivities" list : Those attached to meshes will be
# removed when we check the meshes (next).
self._orphan_connectivities = {
var_name: var
for var_name, var in self._all_vars.items()
if (
"cf_role" in var.attributes
and (
str(var.attributes.get("cf_role"))
in _VALID_CONNECTIVITY_ROLES
)
)
}
# Check all mesh vars
# Note: this call also fills in 'self._all_mesh_dims', and checks all
# the attached coordinates and connectivites for each mesh.
for meshvar in self._mesh_vars.values():
self.check_mesh_var(meshvar)
# Check all lis-vars
# Note: this call also fills in 'self._all_mesh_dims'.
for lis_var in self._lis_vars.values():
self.check_lis_var(lis_var)
def dataset_detect_shared_dims(self):
"""
Check for any dimensions shared between meshes - an advisory warning.
"""
# Convert all_meshes_dims: {meshname: {location: dimname}}
# .. to dim_meshes: {dimname: [meshnames]}
dim_meshes = {}
for mesh, location_dims in self._all_mesh_dims.items():
for location, dim in location_dims.items():
# Fetch list
meshnames = dim_meshes.get(dim, set())
if dim:
# TODO: what if a dim is used by 2 different locations of
# of the same mesh ?
meshnames.add(mesh)
# Write list back
dim_meshes[dim] = meshnames
# Check for any dims which are used more than once.
for dim, meshnames in dim_meshes.items():
if len(meshnames) > 1:
# TODO: what if a dim is used by 2 different locations of
# of the same mesh ?
# We would get a repeated meshname here...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.